]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/intel_display.c
drm/i915: Make intel_set_pipe_timings/src_size take a pointer to crtc_state
[linux.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include "intel_frontbuffer.h"
38 #include <drm/i915_drm.h>
39 #include "i915_drv.h"
40 #include "i915_gem_clflush.h"
41 #include "intel_dsi.h"
42 #include "i915_trace.h"
43 #include <drm/drm_atomic.h>
44 #include <drm/drm_atomic_helper.h>
45 #include <drm/drm_dp_helper.h>
46 #include <drm/drm_crtc_helper.h>
47 #include <drm/drm_plane_helper.h>
48 #include <drm/drm_rect.h>
49 #include <drm/drm_atomic_uapi.h>
50 #include <linux/dma_remapping.h>
51 #include <linux/reservation.h>
52
53 /* Primary plane formats for gen <= 3 */
54 static const uint32_t i8xx_primary_formats[] = {
55         DRM_FORMAT_C8,
56         DRM_FORMAT_RGB565,
57         DRM_FORMAT_XRGB1555,
58         DRM_FORMAT_XRGB8888,
59 };
60
61 /* Primary plane formats for gen >= 4 */
62 static const uint32_t i965_primary_formats[] = {
63         DRM_FORMAT_C8,
64         DRM_FORMAT_RGB565,
65         DRM_FORMAT_XRGB8888,
66         DRM_FORMAT_XBGR8888,
67         DRM_FORMAT_XRGB2101010,
68         DRM_FORMAT_XBGR2101010,
69 };
70
71 static const uint64_t i9xx_format_modifiers[] = {
72         I915_FORMAT_MOD_X_TILED,
73         DRM_FORMAT_MOD_LINEAR,
74         DRM_FORMAT_MOD_INVALID
75 };
76
77 static const uint32_t skl_primary_formats[] = {
78         DRM_FORMAT_C8,
79         DRM_FORMAT_RGB565,
80         DRM_FORMAT_XRGB8888,
81         DRM_FORMAT_XBGR8888,
82         DRM_FORMAT_ARGB8888,
83         DRM_FORMAT_ABGR8888,
84         DRM_FORMAT_XRGB2101010,
85         DRM_FORMAT_XBGR2101010,
86         DRM_FORMAT_YUYV,
87         DRM_FORMAT_YVYU,
88         DRM_FORMAT_UYVY,
89         DRM_FORMAT_VYUY,
90 };
91
92 static const uint32_t skl_pri_planar_formats[] = {
93         DRM_FORMAT_C8,
94         DRM_FORMAT_RGB565,
95         DRM_FORMAT_XRGB8888,
96         DRM_FORMAT_XBGR8888,
97         DRM_FORMAT_ARGB8888,
98         DRM_FORMAT_ABGR8888,
99         DRM_FORMAT_XRGB2101010,
100         DRM_FORMAT_XBGR2101010,
101         DRM_FORMAT_YUYV,
102         DRM_FORMAT_YVYU,
103         DRM_FORMAT_UYVY,
104         DRM_FORMAT_VYUY,
105         DRM_FORMAT_NV12,
106 };
107
108 static const uint64_t skl_format_modifiers_noccs[] = {
109         I915_FORMAT_MOD_Yf_TILED,
110         I915_FORMAT_MOD_Y_TILED,
111         I915_FORMAT_MOD_X_TILED,
112         DRM_FORMAT_MOD_LINEAR,
113         DRM_FORMAT_MOD_INVALID
114 };
115
116 static const uint64_t skl_format_modifiers_ccs[] = {
117         I915_FORMAT_MOD_Yf_TILED_CCS,
118         I915_FORMAT_MOD_Y_TILED_CCS,
119         I915_FORMAT_MOD_Yf_TILED,
120         I915_FORMAT_MOD_Y_TILED,
121         I915_FORMAT_MOD_X_TILED,
122         DRM_FORMAT_MOD_LINEAR,
123         DRM_FORMAT_MOD_INVALID
124 };
125
126 /* Cursor formats */
127 static const uint32_t intel_cursor_formats[] = {
128         DRM_FORMAT_ARGB8888,
129 };
130
131 static const uint64_t cursor_format_modifiers[] = {
132         DRM_FORMAT_MOD_LINEAR,
133         DRM_FORMAT_MOD_INVALID
134 };
135
136 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
137                                 struct intel_crtc_state *pipe_config);
138 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
139                                    struct intel_crtc_state *pipe_config);
140
141 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
142                                   struct drm_i915_gem_object *obj,
143                                   struct drm_mode_fb_cmd2 *mode_cmd);
144 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
145 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
146 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
147                                          struct intel_link_m_n *m_n,
148                                          struct intel_link_m_n *m2_n2);
149 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
150 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
151 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
152 static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
153 static void vlv_prepare_pll(struct intel_crtc *crtc,
154                             const struct intel_crtc_state *pipe_config);
155 static void chv_prepare_pll(struct intel_crtc *crtc,
156                             const struct intel_crtc_state *pipe_config);
157 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
158 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
159 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
160                                     struct intel_crtc_state *crtc_state);
161 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
162 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
163 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
164 static void intel_modeset_setup_hw_state(struct drm_device *dev,
165                                          struct drm_modeset_acquire_ctx *ctx);
166 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
167
168 struct intel_limit {
169         struct {
170                 int min, max;
171         } dot, vco, n, m, m1, m2, p, p1;
172
173         struct {
174                 int dot_limit;
175                 int p2_slow, p2_fast;
176         } p2;
177 };
178
179 /* returns HPLL frequency in kHz */
180 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
181 {
182         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
183
184         /* Obtain SKU information */
185         mutex_lock(&dev_priv->sb_lock);
186         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
187                 CCK_FUSE_HPLL_FREQ_MASK;
188         mutex_unlock(&dev_priv->sb_lock);
189
190         return vco_freq[hpll_freq] * 1000;
191 }
192
193 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
194                       const char *name, u32 reg, int ref_freq)
195 {
196         u32 val;
197         int divider;
198
199         mutex_lock(&dev_priv->sb_lock);
200         val = vlv_cck_read(dev_priv, reg);
201         mutex_unlock(&dev_priv->sb_lock);
202
203         divider = val & CCK_FREQUENCY_VALUES;
204
205         WARN((val & CCK_FREQUENCY_STATUS) !=
206              (divider << CCK_FREQUENCY_STATUS_SHIFT),
207              "%s change in progress\n", name);
208
209         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
210 }
211
212 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
213                            const char *name, u32 reg)
214 {
215         if (dev_priv->hpll_freq == 0)
216                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
217
218         return vlv_get_cck_clock(dev_priv, name, reg,
219                                  dev_priv->hpll_freq);
220 }
221
222 static void intel_update_czclk(struct drm_i915_private *dev_priv)
223 {
224         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
225                 return;
226
227         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
228                                                       CCK_CZ_CLOCK_CONTROL);
229
230         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
231 }
232
233 static inline u32 /* units of 100MHz */
234 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
235                     const struct intel_crtc_state *pipe_config)
236 {
237         if (HAS_DDI(dev_priv))
238                 return pipe_config->port_clock; /* SPLL */
239         else
240                 return dev_priv->fdi_pll_freq;
241 }
242
243 static const struct intel_limit intel_limits_i8xx_dac = {
244         .dot = { .min = 25000, .max = 350000 },
245         .vco = { .min = 908000, .max = 1512000 },
246         .n = { .min = 2, .max = 16 },
247         .m = { .min = 96, .max = 140 },
248         .m1 = { .min = 18, .max = 26 },
249         .m2 = { .min = 6, .max = 16 },
250         .p = { .min = 4, .max = 128 },
251         .p1 = { .min = 2, .max = 33 },
252         .p2 = { .dot_limit = 165000,
253                 .p2_slow = 4, .p2_fast = 2 },
254 };
255
256 static const struct intel_limit intel_limits_i8xx_dvo = {
257         .dot = { .min = 25000, .max = 350000 },
258         .vco = { .min = 908000, .max = 1512000 },
259         .n = { .min = 2, .max = 16 },
260         .m = { .min = 96, .max = 140 },
261         .m1 = { .min = 18, .max = 26 },
262         .m2 = { .min = 6, .max = 16 },
263         .p = { .min = 4, .max = 128 },
264         .p1 = { .min = 2, .max = 33 },
265         .p2 = { .dot_limit = 165000,
266                 .p2_slow = 4, .p2_fast = 4 },
267 };
268
269 static const struct intel_limit intel_limits_i8xx_lvds = {
270         .dot = { .min = 25000, .max = 350000 },
271         .vco = { .min = 908000, .max = 1512000 },
272         .n = { .min = 2, .max = 16 },
273         .m = { .min = 96, .max = 140 },
274         .m1 = { .min = 18, .max = 26 },
275         .m2 = { .min = 6, .max = 16 },
276         .p = { .min = 4, .max = 128 },
277         .p1 = { .min = 1, .max = 6 },
278         .p2 = { .dot_limit = 165000,
279                 .p2_slow = 14, .p2_fast = 7 },
280 };
281
282 static const struct intel_limit intel_limits_i9xx_sdvo = {
283         .dot = { .min = 20000, .max = 400000 },
284         .vco = { .min = 1400000, .max = 2800000 },
285         .n = { .min = 1, .max = 6 },
286         .m = { .min = 70, .max = 120 },
287         .m1 = { .min = 8, .max = 18 },
288         .m2 = { .min = 3, .max = 7 },
289         .p = { .min = 5, .max = 80 },
290         .p1 = { .min = 1, .max = 8 },
291         .p2 = { .dot_limit = 200000,
292                 .p2_slow = 10, .p2_fast = 5 },
293 };
294
295 static const struct intel_limit intel_limits_i9xx_lvds = {
296         .dot = { .min = 20000, .max = 400000 },
297         .vco = { .min = 1400000, .max = 2800000 },
298         .n = { .min = 1, .max = 6 },
299         .m = { .min = 70, .max = 120 },
300         .m1 = { .min = 8, .max = 18 },
301         .m2 = { .min = 3, .max = 7 },
302         .p = { .min = 7, .max = 98 },
303         .p1 = { .min = 1, .max = 8 },
304         .p2 = { .dot_limit = 112000,
305                 .p2_slow = 14, .p2_fast = 7 },
306 };
307
308
309 static const struct intel_limit intel_limits_g4x_sdvo = {
310         .dot = { .min = 25000, .max = 270000 },
311         .vco = { .min = 1750000, .max = 3500000},
312         .n = { .min = 1, .max = 4 },
313         .m = { .min = 104, .max = 138 },
314         .m1 = { .min = 17, .max = 23 },
315         .m2 = { .min = 5, .max = 11 },
316         .p = { .min = 10, .max = 30 },
317         .p1 = { .min = 1, .max = 3},
318         .p2 = { .dot_limit = 270000,
319                 .p2_slow = 10,
320                 .p2_fast = 10
321         },
322 };
323
324 static const struct intel_limit intel_limits_g4x_hdmi = {
325         .dot = { .min = 22000, .max = 400000 },
326         .vco = { .min = 1750000, .max = 3500000},
327         .n = { .min = 1, .max = 4 },
328         .m = { .min = 104, .max = 138 },
329         .m1 = { .min = 16, .max = 23 },
330         .m2 = { .min = 5, .max = 11 },
331         .p = { .min = 5, .max = 80 },
332         .p1 = { .min = 1, .max = 8},
333         .p2 = { .dot_limit = 165000,
334                 .p2_slow = 10, .p2_fast = 5 },
335 };
336
337 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
338         .dot = { .min = 20000, .max = 115000 },
339         .vco = { .min = 1750000, .max = 3500000 },
340         .n = { .min = 1, .max = 3 },
341         .m = { .min = 104, .max = 138 },
342         .m1 = { .min = 17, .max = 23 },
343         .m2 = { .min = 5, .max = 11 },
344         .p = { .min = 28, .max = 112 },
345         .p1 = { .min = 2, .max = 8 },
346         .p2 = { .dot_limit = 0,
347                 .p2_slow = 14, .p2_fast = 14
348         },
349 };
350
351 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
352         .dot = { .min = 80000, .max = 224000 },
353         .vco = { .min = 1750000, .max = 3500000 },
354         .n = { .min = 1, .max = 3 },
355         .m = { .min = 104, .max = 138 },
356         .m1 = { .min = 17, .max = 23 },
357         .m2 = { .min = 5, .max = 11 },
358         .p = { .min = 14, .max = 42 },
359         .p1 = { .min = 2, .max = 6 },
360         .p2 = { .dot_limit = 0,
361                 .p2_slow = 7, .p2_fast = 7
362         },
363 };
364
365 static const struct intel_limit intel_limits_pineview_sdvo = {
366         .dot = { .min = 20000, .max = 400000},
367         .vco = { .min = 1700000, .max = 3500000 },
368         /* Pineview's Ncounter is a ring counter */
369         .n = { .min = 3, .max = 6 },
370         .m = { .min = 2, .max = 256 },
371         /* Pineview only has one combined m divider, which we treat as m2. */
372         .m1 = { .min = 0, .max = 0 },
373         .m2 = { .min = 0, .max = 254 },
374         .p = { .min = 5, .max = 80 },
375         .p1 = { .min = 1, .max = 8 },
376         .p2 = { .dot_limit = 200000,
377                 .p2_slow = 10, .p2_fast = 5 },
378 };
379
380 static const struct intel_limit intel_limits_pineview_lvds = {
381         .dot = { .min = 20000, .max = 400000 },
382         .vco = { .min = 1700000, .max = 3500000 },
383         .n = { .min = 3, .max = 6 },
384         .m = { .min = 2, .max = 256 },
385         .m1 = { .min = 0, .max = 0 },
386         .m2 = { .min = 0, .max = 254 },
387         .p = { .min = 7, .max = 112 },
388         .p1 = { .min = 1, .max = 8 },
389         .p2 = { .dot_limit = 112000,
390                 .p2_slow = 14, .p2_fast = 14 },
391 };
392
393 /* Ironlake / Sandybridge
394  *
395  * We calculate clock using (register_value + 2) for N/M1/M2, so here
396  * the range value for them is (actual_value - 2).
397  */
398 static const struct intel_limit intel_limits_ironlake_dac = {
399         .dot = { .min = 25000, .max = 350000 },
400         .vco = { .min = 1760000, .max = 3510000 },
401         .n = { .min = 1, .max = 5 },
402         .m = { .min = 79, .max = 127 },
403         .m1 = { .min = 12, .max = 22 },
404         .m2 = { .min = 5, .max = 9 },
405         .p = { .min = 5, .max = 80 },
406         .p1 = { .min = 1, .max = 8 },
407         .p2 = { .dot_limit = 225000,
408                 .p2_slow = 10, .p2_fast = 5 },
409 };
410
411 static const struct intel_limit intel_limits_ironlake_single_lvds = {
412         .dot = { .min = 25000, .max = 350000 },
413         .vco = { .min = 1760000, .max = 3510000 },
414         .n = { .min = 1, .max = 3 },
415         .m = { .min = 79, .max = 118 },
416         .m1 = { .min = 12, .max = 22 },
417         .m2 = { .min = 5, .max = 9 },
418         .p = { .min = 28, .max = 112 },
419         .p1 = { .min = 2, .max = 8 },
420         .p2 = { .dot_limit = 225000,
421                 .p2_slow = 14, .p2_fast = 14 },
422 };
423
424 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
425         .dot = { .min = 25000, .max = 350000 },
426         .vco = { .min = 1760000, .max = 3510000 },
427         .n = { .min = 1, .max = 3 },
428         .m = { .min = 79, .max = 127 },
429         .m1 = { .min = 12, .max = 22 },
430         .m2 = { .min = 5, .max = 9 },
431         .p = { .min = 14, .max = 56 },
432         .p1 = { .min = 2, .max = 8 },
433         .p2 = { .dot_limit = 225000,
434                 .p2_slow = 7, .p2_fast = 7 },
435 };
436
437 /* LVDS 100mhz refclk limits. */
438 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
439         .dot = { .min = 25000, .max = 350000 },
440         .vco = { .min = 1760000, .max = 3510000 },
441         .n = { .min = 1, .max = 2 },
442         .m = { .min = 79, .max = 126 },
443         .m1 = { .min = 12, .max = 22 },
444         .m2 = { .min = 5, .max = 9 },
445         .p = { .min = 28, .max = 112 },
446         .p1 = { .min = 2, .max = 8 },
447         .p2 = { .dot_limit = 225000,
448                 .p2_slow = 14, .p2_fast = 14 },
449 };
450
451 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
452         .dot = { .min = 25000, .max = 350000 },
453         .vco = { .min = 1760000, .max = 3510000 },
454         .n = { .min = 1, .max = 3 },
455         .m = { .min = 79, .max = 126 },
456         .m1 = { .min = 12, .max = 22 },
457         .m2 = { .min = 5, .max = 9 },
458         .p = { .min = 14, .max = 42 },
459         .p1 = { .min = 2, .max = 6 },
460         .p2 = { .dot_limit = 225000,
461                 .p2_slow = 7, .p2_fast = 7 },
462 };
463
464 static const struct intel_limit intel_limits_vlv = {
465          /*
466           * These are the data rate limits (measured in fast clocks)
467           * since those are the strictest limits we have. The fast
468           * clock and actual rate limits are more relaxed, so checking
469           * them would make no difference.
470           */
471         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
472         .vco = { .min = 4000000, .max = 6000000 },
473         .n = { .min = 1, .max = 7 },
474         .m1 = { .min = 2, .max = 3 },
475         .m2 = { .min = 11, .max = 156 },
476         .p1 = { .min = 2, .max = 3 },
477         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
478 };
479
480 static const struct intel_limit intel_limits_chv = {
481         /*
482          * These are the data rate limits (measured in fast clocks)
483          * since those are the strictest limits we have.  The fast
484          * clock and actual rate limits are more relaxed, so checking
485          * them would make no difference.
486          */
487         .dot = { .min = 25000 * 5, .max = 540000 * 5},
488         .vco = { .min = 4800000, .max = 6480000 },
489         .n = { .min = 1, .max = 1 },
490         .m1 = { .min = 2, .max = 2 },
491         .m2 = { .min = 24 << 22, .max = 175 << 22 },
492         .p1 = { .min = 2, .max = 4 },
493         .p2 = { .p2_slow = 1, .p2_fast = 14 },
494 };
495
496 static const struct intel_limit intel_limits_bxt = {
497         /* FIXME: find real dot limits */
498         .dot = { .min = 0, .max = INT_MAX },
499         .vco = { .min = 4800000, .max = 6700000 },
500         .n = { .min = 1, .max = 1 },
501         .m1 = { .min = 2, .max = 2 },
502         /* FIXME: find real m2 limits */
503         .m2 = { .min = 2 << 22, .max = 255 << 22 },
504         .p1 = { .min = 2, .max = 4 },
505         .p2 = { .p2_slow = 1, .p2_fast = 20 },
506 };
507
508 static void
509 skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
510 {
511         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
512                 return;
513
514         if (enable)
515                 I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
516         else
517                 I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
518 }
519
520 static void
521 skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
522 {
523         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
524                 return;
525
526         if (enable)
527                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
528                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
529         else
530                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
531                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
532                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
533 }
534
535 static bool
536 needs_modeset(const struct drm_crtc_state *state)
537 {
538         return drm_atomic_crtc_needs_modeset(state);
539 }
540
541 /*
542  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
543  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
544  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
545  * The helpers' return value is the rate of the clock that is fed to the
546  * display engine's pipe which can be the above fast dot clock rate or a
547  * divided-down version of it.
548  */
549 /* m1 is reserved as 0 in Pineview, n is a ring counter */
550 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
551 {
552         clock->m = clock->m2 + 2;
553         clock->p = clock->p1 * clock->p2;
554         if (WARN_ON(clock->n == 0 || clock->p == 0))
555                 return 0;
556         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
557         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
558
559         return clock->dot;
560 }
561
562 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
563 {
564         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
565 }
566
567 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
568 {
569         clock->m = i9xx_dpll_compute_m(clock);
570         clock->p = clock->p1 * clock->p2;
571         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
572                 return 0;
573         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
574         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
575
576         return clock->dot;
577 }
578
579 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
580 {
581         clock->m = clock->m1 * clock->m2;
582         clock->p = clock->p1 * clock->p2;
583         if (WARN_ON(clock->n == 0 || clock->p == 0))
584                 return 0;
585         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
586         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
587
588         return clock->dot / 5;
589 }
590
591 int chv_calc_dpll_params(int refclk, struct dpll *clock)
592 {
593         clock->m = clock->m1 * clock->m2;
594         clock->p = clock->p1 * clock->p2;
595         if (WARN_ON(clock->n == 0 || clock->p == 0))
596                 return 0;
597         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
598                         clock->n << 22);
599         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
600
601         return clock->dot / 5;
602 }
603
604 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
605
606 /*
607  * Returns whether the given set of divisors are valid for a given refclk with
608  * the given connectors.
609  */
610 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
611                                const struct intel_limit *limit,
612                                const struct dpll *clock)
613 {
614         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
615                 INTELPllInvalid("n out of range\n");
616         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
617                 INTELPllInvalid("p1 out of range\n");
618         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
619                 INTELPllInvalid("m2 out of range\n");
620         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
621                 INTELPllInvalid("m1 out of range\n");
622
623         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
624             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
625                 if (clock->m1 <= clock->m2)
626                         INTELPllInvalid("m1 <= m2\n");
627
628         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
629             !IS_GEN9_LP(dev_priv)) {
630                 if (clock->p < limit->p.min || limit->p.max < clock->p)
631                         INTELPllInvalid("p out of range\n");
632                 if (clock->m < limit->m.min || limit->m.max < clock->m)
633                         INTELPllInvalid("m out of range\n");
634         }
635
636         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
637                 INTELPllInvalid("vco out of range\n");
638         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
639          * connector, etc., rather than just a single range.
640          */
641         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
642                 INTELPllInvalid("dot out of range\n");
643
644         return true;
645 }
646
647 static int
648 i9xx_select_p2_div(const struct intel_limit *limit,
649                    const struct intel_crtc_state *crtc_state,
650                    int target)
651 {
652         struct drm_device *dev = crtc_state->base.crtc->dev;
653
654         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
655                 /*
656                  * For LVDS just rely on its current settings for dual-channel.
657                  * We haven't figured out how to reliably set up different
658                  * single/dual channel state, if we even can.
659                  */
660                 if (intel_is_dual_link_lvds(dev))
661                         return limit->p2.p2_fast;
662                 else
663                         return limit->p2.p2_slow;
664         } else {
665                 if (target < limit->p2.dot_limit)
666                         return limit->p2.p2_slow;
667                 else
668                         return limit->p2.p2_fast;
669         }
670 }
671
672 /*
673  * Returns a set of divisors for the desired target clock with the given
674  * refclk, or FALSE.  The returned values represent the clock equation:
675  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
676  *
677  * Target and reference clocks are specified in kHz.
678  *
679  * If match_clock is provided, then best_clock P divider must match the P
680  * divider from @match_clock used for LVDS downclocking.
681  */
682 static bool
683 i9xx_find_best_dpll(const struct intel_limit *limit,
684                     struct intel_crtc_state *crtc_state,
685                     int target, int refclk, struct dpll *match_clock,
686                     struct dpll *best_clock)
687 {
688         struct drm_device *dev = crtc_state->base.crtc->dev;
689         struct dpll clock;
690         int err = target;
691
692         memset(best_clock, 0, sizeof(*best_clock));
693
694         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
695
696         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
697              clock.m1++) {
698                 for (clock.m2 = limit->m2.min;
699                      clock.m2 <= limit->m2.max; clock.m2++) {
700                         if (clock.m2 >= clock.m1)
701                                 break;
702                         for (clock.n = limit->n.min;
703                              clock.n <= limit->n.max; clock.n++) {
704                                 for (clock.p1 = limit->p1.min;
705                                         clock.p1 <= limit->p1.max; clock.p1++) {
706                                         int this_err;
707
708                                         i9xx_calc_dpll_params(refclk, &clock);
709                                         if (!intel_PLL_is_valid(to_i915(dev),
710                                                                 limit,
711                                                                 &clock))
712                                                 continue;
713                                         if (match_clock &&
714                                             clock.p != match_clock->p)
715                                                 continue;
716
717                                         this_err = abs(clock.dot - target);
718                                         if (this_err < err) {
719                                                 *best_clock = clock;
720                                                 err = this_err;
721                                         }
722                                 }
723                         }
724                 }
725         }
726
727         return (err != target);
728 }
729
730 /*
731  * Returns a set of divisors for the desired target clock with the given
732  * refclk, or FALSE.  The returned values represent the clock equation:
733  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
734  *
735  * Target and reference clocks are specified in kHz.
736  *
737  * If match_clock is provided, then best_clock P divider must match the P
738  * divider from @match_clock used for LVDS downclocking.
739  */
740 static bool
741 pnv_find_best_dpll(const struct intel_limit *limit,
742                    struct intel_crtc_state *crtc_state,
743                    int target, int refclk, struct dpll *match_clock,
744                    struct dpll *best_clock)
745 {
746         struct drm_device *dev = crtc_state->base.crtc->dev;
747         struct dpll clock;
748         int err = target;
749
750         memset(best_clock, 0, sizeof(*best_clock));
751
752         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
753
754         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
755              clock.m1++) {
756                 for (clock.m2 = limit->m2.min;
757                      clock.m2 <= limit->m2.max; clock.m2++) {
758                         for (clock.n = limit->n.min;
759                              clock.n <= limit->n.max; clock.n++) {
760                                 for (clock.p1 = limit->p1.min;
761                                         clock.p1 <= limit->p1.max; clock.p1++) {
762                                         int this_err;
763
764                                         pnv_calc_dpll_params(refclk, &clock);
765                                         if (!intel_PLL_is_valid(to_i915(dev),
766                                                                 limit,
767                                                                 &clock))
768                                                 continue;
769                                         if (match_clock &&
770                                             clock.p != match_clock->p)
771                                                 continue;
772
773                                         this_err = abs(clock.dot - target);
774                                         if (this_err < err) {
775                                                 *best_clock = clock;
776                                                 err = this_err;
777                                         }
778                                 }
779                         }
780                 }
781         }
782
783         return (err != target);
784 }
785
786 /*
787  * Returns a set of divisors for the desired target clock with the given
788  * refclk, or FALSE.  The returned values represent the clock equation:
789  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
790  *
791  * Target and reference clocks are specified in kHz.
792  *
793  * If match_clock is provided, then best_clock P divider must match the P
794  * divider from @match_clock used for LVDS downclocking.
795  */
796 static bool
797 g4x_find_best_dpll(const struct intel_limit *limit,
798                    struct intel_crtc_state *crtc_state,
799                    int target, int refclk, struct dpll *match_clock,
800                    struct dpll *best_clock)
801 {
802         struct drm_device *dev = crtc_state->base.crtc->dev;
803         struct dpll clock;
804         int max_n;
805         bool found = false;
806         /* approximately equals target * 0.00585 */
807         int err_most = (target >> 8) + (target >> 9);
808
809         memset(best_clock, 0, sizeof(*best_clock));
810
811         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
812
813         max_n = limit->n.max;
814         /* based on hardware requirement, prefer smaller n to precision */
815         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
816                 /* based on hardware requirement, prefere larger m1,m2 */
817                 for (clock.m1 = limit->m1.max;
818                      clock.m1 >= limit->m1.min; clock.m1--) {
819                         for (clock.m2 = limit->m2.max;
820                              clock.m2 >= limit->m2.min; clock.m2--) {
821                                 for (clock.p1 = limit->p1.max;
822                                      clock.p1 >= limit->p1.min; clock.p1--) {
823                                         int this_err;
824
825                                         i9xx_calc_dpll_params(refclk, &clock);
826                                         if (!intel_PLL_is_valid(to_i915(dev),
827                                                                 limit,
828                                                                 &clock))
829                                                 continue;
830
831                                         this_err = abs(clock.dot - target);
832                                         if (this_err < err_most) {
833                                                 *best_clock = clock;
834                                                 err_most = this_err;
835                                                 max_n = clock.n;
836                                                 found = true;
837                                         }
838                                 }
839                         }
840                 }
841         }
842         return found;
843 }
844
845 /*
846  * Check if the calculated PLL configuration is more optimal compared to the
847  * best configuration and error found so far. Return the calculated error.
848  */
849 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
850                                const struct dpll *calculated_clock,
851                                const struct dpll *best_clock,
852                                unsigned int best_error_ppm,
853                                unsigned int *error_ppm)
854 {
855         /*
856          * For CHV ignore the error and consider only the P value.
857          * Prefer a bigger P value based on HW requirements.
858          */
859         if (IS_CHERRYVIEW(to_i915(dev))) {
860                 *error_ppm = 0;
861
862                 return calculated_clock->p > best_clock->p;
863         }
864
865         if (WARN_ON_ONCE(!target_freq))
866                 return false;
867
868         *error_ppm = div_u64(1000000ULL *
869                                 abs(target_freq - calculated_clock->dot),
870                              target_freq);
871         /*
872          * Prefer a better P value over a better (smaller) error if the error
873          * is small. Ensure this preference for future configurations too by
874          * setting the error to 0.
875          */
876         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
877                 *error_ppm = 0;
878
879                 return true;
880         }
881
882         return *error_ppm + 10 < best_error_ppm;
883 }
884
885 /*
886  * Returns a set of divisors for the desired target clock with the given
887  * refclk, or FALSE.  The returned values represent the clock equation:
888  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
889  */
890 static bool
891 vlv_find_best_dpll(const struct intel_limit *limit,
892                    struct intel_crtc_state *crtc_state,
893                    int target, int refclk, struct dpll *match_clock,
894                    struct dpll *best_clock)
895 {
896         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
897         struct drm_device *dev = crtc->base.dev;
898         struct dpll clock;
899         unsigned int bestppm = 1000000;
900         /* min update 19.2 MHz */
901         int max_n = min(limit->n.max, refclk / 19200);
902         bool found = false;
903
904         target *= 5; /* fast clock */
905
906         memset(best_clock, 0, sizeof(*best_clock));
907
908         /* based on hardware requirement, prefer smaller n to precision */
909         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
910                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
911                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
912                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
913                                 clock.p = clock.p1 * clock.p2;
914                                 /* based on hardware requirement, prefer bigger m1,m2 values */
915                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
916                                         unsigned int ppm;
917
918                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
919                                                                      refclk * clock.m1);
920
921                                         vlv_calc_dpll_params(refclk, &clock);
922
923                                         if (!intel_PLL_is_valid(to_i915(dev),
924                                                                 limit,
925                                                                 &clock))
926                                                 continue;
927
928                                         if (!vlv_PLL_is_optimal(dev, target,
929                                                                 &clock,
930                                                                 best_clock,
931                                                                 bestppm, &ppm))
932                                                 continue;
933
934                                         *best_clock = clock;
935                                         bestppm = ppm;
936                                         found = true;
937                                 }
938                         }
939                 }
940         }
941
942         return found;
943 }
944
945 /*
946  * Returns a set of divisors for the desired target clock with the given
947  * refclk, or FALSE.  The returned values represent the clock equation:
948  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
949  */
950 static bool
951 chv_find_best_dpll(const struct intel_limit *limit,
952                    struct intel_crtc_state *crtc_state,
953                    int target, int refclk, struct dpll *match_clock,
954                    struct dpll *best_clock)
955 {
956         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
957         struct drm_device *dev = crtc->base.dev;
958         unsigned int best_error_ppm;
959         struct dpll clock;
960         uint64_t m2;
961         int found = false;
962
963         memset(best_clock, 0, sizeof(*best_clock));
964         best_error_ppm = 1000000;
965
966         /*
967          * Based on hardware doc, the n always set to 1, and m1 always
968          * set to 2.  If requires to support 200Mhz refclk, we need to
969          * revisit this because n may not 1 anymore.
970          */
971         clock.n = 1, clock.m1 = 2;
972         target *= 5;    /* fast clock */
973
974         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
975                 for (clock.p2 = limit->p2.p2_fast;
976                                 clock.p2 >= limit->p2.p2_slow;
977                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
978                         unsigned int error_ppm;
979
980                         clock.p = clock.p1 * clock.p2;
981
982                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
983                                         clock.n) << 22, refclk * clock.m1);
984
985                         if (m2 > INT_MAX/clock.m1)
986                                 continue;
987
988                         clock.m2 = m2;
989
990                         chv_calc_dpll_params(refclk, &clock);
991
992                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
993                                 continue;
994
995                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
996                                                 best_error_ppm, &error_ppm))
997                                 continue;
998
999                         *best_clock = clock;
1000                         best_error_ppm = error_ppm;
1001                         found = true;
1002                 }
1003         }
1004
1005         return found;
1006 }
1007
1008 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1009                         struct dpll *best_clock)
1010 {
1011         int refclk = 100000;
1012         const struct intel_limit *limit = &intel_limits_bxt;
1013
1014         return chv_find_best_dpll(limit, crtc_state,
1015                                   target_clock, refclk, NULL, best_clock);
1016 }
1017
1018 bool intel_crtc_active(struct intel_crtc *crtc)
1019 {
1020         /* Be paranoid as we can arrive here with only partial
1021          * state retrieved from the hardware during setup.
1022          *
1023          * We can ditch the adjusted_mode.crtc_clock check as soon
1024          * as Haswell has gained clock readout/fastboot support.
1025          *
1026          * We can ditch the crtc->primary->state->fb check as soon as we can
1027          * properly reconstruct framebuffers.
1028          *
1029          * FIXME: The intel_crtc->active here should be switched to
1030          * crtc->state->active once we have proper CRTC states wired up
1031          * for atomic.
1032          */
1033         return crtc->active && crtc->base.primary->state->fb &&
1034                 crtc->config->base.adjusted_mode.crtc_clock;
1035 }
1036
1037 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1038                                              enum pipe pipe)
1039 {
1040         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1041
1042         return crtc->config->cpu_transcoder;
1043 }
1044
1045 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1046                                     enum pipe pipe)
1047 {
1048         i915_reg_t reg = PIPEDSL(pipe);
1049         u32 line1, line2;
1050         u32 line_mask;
1051
1052         if (IS_GEN2(dev_priv))
1053                 line_mask = DSL_LINEMASK_GEN2;
1054         else
1055                 line_mask = DSL_LINEMASK_GEN3;
1056
1057         line1 = I915_READ(reg) & line_mask;
1058         msleep(5);
1059         line2 = I915_READ(reg) & line_mask;
1060
1061         return line1 != line2;
1062 }
1063
1064 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1065 {
1066         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1067         enum pipe pipe = crtc->pipe;
1068
1069         /* Wait for the display line to settle/start moving */
1070         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1071                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1072                           pipe_name(pipe), onoff(state));
1073 }
1074
1075 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1076 {
1077         wait_for_pipe_scanline_moving(crtc, false);
1078 }
1079
1080 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1081 {
1082         wait_for_pipe_scanline_moving(crtc, true);
1083 }
1084
1085 static void
1086 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1087 {
1088         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1089         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1090
1091         if (INTEL_GEN(dev_priv) >= 4) {
1092                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1093                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1094
1095                 /* Wait for the Pipe State to go off */
1096                 if (intel_wait_for_register(dev_priv,
1097                                             reg, I965_PIPECONF_ACTIVE, 0,
1098                                             100))
1099                         WARN(1, "pipe_off wait timed out\n");
1100         } else {
1101                 intel_wait_for_pipe_scanline_stopped(crtc);
1102         }
1103 }
1104
1105 /* Only for pre-ILK configs */
1106 void assert_pll(struct drm_i915_private *dev_priv,
1107                 enum pipe pipe, bool state)
1108 {
1109         u32 val;
1110         bool cur_state;
1111
1112         val = I915_READ(DPLL(pipe));
1113         cur_state = !!(val & DPLL_VCO_ENABLE);
1114         I915_STATE_WARN(cur_state != state,
1115              "PLL state assertion failure (expected %s, current %s)\n",
1116                         onoff(state), onoff(cur_state));
1117 }
1118
1119 /* XXX: the dsi pll is shared between MIPI DSI ports */
1120 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1121 {
1122         u32 val;
1123         bool cur_state;
1124
1125         mutex_lock(&dev_priv->sb_lock);
1126         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1127         mutex_unlock(&dev_priv->sb_lock);
1128
1129         cur_state = val & DSI_PLL_VCO_EN;
1130         I915_STATE_WARN(cur_state != state,
1131              "DSI PLL state assertion failure (expected %s, current %s)\n",
1132                         onoff(state), onoff(cur_state));
1133 }
1134
1135 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1136                           enum pipe pipe, bool state)
1137 {
1138         bool cur_state;
1139         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1140                                                                       pipe);
1141
1142         if (HAS_DDI(dev_priv)) {
1143                 /* DDI does not have a specific FDI_TX register */
1144                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1145                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1146         } else {
1147                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1148                 cur_state = !!(val & FDI_TX_ENABLE);
1149         }
1150         I915_STATE_WARN(cur_state != state,
1151              "FDI TX state assertion failure (expected %s, current %s)\n",
1152                         onoff(state), onoff(cur_state));
1153 }
1154 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1155 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1156
1157 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1158                           enum pipe pipe, bool state)
1159 {
1160         u32 val;
1161         bool cur_state;
1162
1163         val = I915_READ(FDI_RX_CTL(pipe));
1164         cur_state = !!(val & FDI_RX_ENABLE);
1165         I915_STATE_WARN(cur_state != state,
1166              "FDI RX state assertion failure (expected %s, current %s)\n",
1167                         onoff(state), onoff(cur_state));
1168 }
1169 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1170 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1171
1172 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1173                                       enum pipe pipe)
1174 {
1175         u32 val;
1176
1177         /* ILK FDI PLL is always enabled */
1178         if (IS_GEN5(dev_priv))
1179                 return;
1180
1181         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1182         if (HAS_DDI(dev_priv))
1183                 return;
1184
1185         val = I915_READ(FDI_TX_CTL(pipe));
1186         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1187 }
1188
1189 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1190                        enum pipe pipe, bool state)
1191 {
1192         u32 val;
1193         bool cur_state;
1194
1195         val = I915_READ(FDI_RX_CTL(pipe));
1196         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1197         I915_STATE_WARN(cur_state != state,
1198              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1199                         onoff(state), onoff(cur_state));
1200 }
1201
1202 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1203 {
1204         i915_reg_t pp_reg;
1205         u32 val;
1206         enum pipe panel_pipe = INVALID_PIPE;
1207         bool locked = true;
1208
1209         if (WARN_ON(HAS_DDI(dev_priv)))
1210                 return;
1211
1212         if (HAS_PCH_SPLIT(dev_priv)) {
1213                 u32 port_sel;
1214
1215                 pp_reg = PP_CONTROL(0);
1216                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1217
1218                 switch (port_sel) {
1219                 case PANEL_PORT_SELECT_LVDS:
1220                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1221                         break;
1222                 case PANEL_PORT_SELECT_DPA:
1223                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1224                         break;
1225                 case PANEL_PORT_SELECT_DPC:
1226                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1227                         break;
1228                 case PANEL_PORT_SELECT_DPD:
1229                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1230                         break;
1231                 default:
1232                         MISSING_CASE(port_sel);
1233                         break;
1234                 }
1235         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1236                 /* presumably write lock depends on pipe, not port select */
1237                 pp_reg = PP_CONTROL(pipe);
1238                 panel_pipe = pipe;
1239         } else {
1240                 u32 port_sel;
1241
1242                 pp_reg = PP_CONTROL(0);
1243                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1244
1245                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1246                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1247         }
1248
1249         val = I915_READ(pp_reg);
1250         if (!(val & PANEL_POWER_ON) ||
1251             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1252                 locked = false;
1253
1254         I915_STATE_WARN(panel_pipe == pipe && locked,
1255              "panel assertion failure, pipe %c regs locked\n",
1256              pipe_name(pipe));
1257 }
1258
1259 void assert_pipe(struct drm_i915_private *dev_priv,
1260                  enum pipe pipe, bool state)
1261 {
1262         bool cur_state;
1263         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1264                                                                       pipe);
1265         enum intel_display_power_domain power_domain;
1266
1267         /* we keep both pipes enabled on 830 */
1268         if (IS_I830(dev_priv))
1269                 state = true;
1270
1271         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1272         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1273                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1274                 cur_state = !!(val & PIPECONF_ENABLE);
1275
1276                 intel_display_power_put(dev_priv, power_domain);
1277         } else {
1278                 cur_state = false;
1279         }
1280
1281         I915_STATE_WARN(cur_state != state,
1282              "pipe %c assertion failure (expected %s, current %s)\n",
1283                         pipe_name(pipe), onoff(state), onoff(cur_state));
1284 }
1285
1286 static void assert_plane(struct intel_plane *plane, bool state)
1287 {
1288         enum pipe pipe;
1289         bool cur_state;
1290
1291         cur_state = plane->get_hw_state(plane, &pipe);
1292
1293         I915_STATE_WARN(cur_state != state,
1294                         "%s assertion failure (expected %s, current %s)\n",
1295                         plane->base.name, onoff(state), onoff(cur_state));
1296 }
1297
1298 #define assert_plane_enabled(p) assert_plane(p, true)
1299 #define assert_plane_disabled(p) assert_plane(p, false)
1300
1301 static void assert_planes_disabled(struct intel_crtc *crtc)
1302 {
1303         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1304         struct intel_plane *plane;
1305
1306         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1307                 assert_plane_disabled(plane);
1308 }
1309
1310 static void assert_vblank_disabled(struct drm_crtc *crtc)
1311 {
1312         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1313                 drm_crtc_vblank_put(crtc);
1314 }
1315
1316 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1317                                     enum pipe pipe)
1318 {
1319         u32 val;
1320         bool enabled;
1321
1322         val = I915_READ(PCH_TRANSCONF(pipe));
1323         enabled = !!(val & TRANS_ENABLE);
1324         I915_STATE_WARN(enabled,
1325              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1326              pipe_name(pipe));
1327 }
1328
1329 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1330                                    enum pipe pipe, enum port port,
1331                                    i915_reg_t dp_reg)
1332 {
1333         enum pipe port_pipe;
1334         bool state;
1335
1336         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1337
1338         I915_STATE_WARN(state && port_pipe == pipe,
1339                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1340                         port_name(port), pipe_name(pipe));
1341
1342         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1343                         "IBX PCH DP %c still using transcoder B\n",
1344                         port_name(port));
1345 }
1346
1347 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1348                                      enum pipe pipe, enum port port,
1349                                      i915_reg_t hdmi_reg)
1350 {
1351         enum pipe port_pipe;
1352         bool state;
1353
1354         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1355
1356         I915_STATE_WARN(state && port_pipe == pipe,
1357                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1358                         port_name(port), pipe_name(pipe));
1359
1360         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1361                         "IBX PCH HDMI %c still using transcoder B\n",
1362                         port_name(port));
1363 }
1364
1365 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1366                                       enum pipe pipe)
1367 {
1368         enum pipe port_pipe;
1369
1370         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1371         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1372         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1373
1374         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1375                         port_pipe == pipe,
1376                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1377                         pipe_name(pipe));
1378
1379         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1380                         port_pipe == pipe,
1381                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1382                         pipe_name(pipe));
1383
1384         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1385         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1386         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1387 }
1388
1389 static void _vlv_enable_pll(struct intel_crtc *crtc,
1390                             const struct intel_crtc_state *pipe_config)
1391 {
1392         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1393         enum pipe pipe = crtc->pipe;
1394
1395         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1396         POSTING_READ(DPLL(pipe));
1397         udelay(150);
1398
1399         if (intel_wait_for_register(dev_priv,
1400                                     DPLL(pipe),
1401                                     DPLL_LOCK_VLV,
1402                                     DPLL_LOCK_VLV,
1403                                     1))
1404                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1405 }
1406
1407 static void vlv_enable_pll(struct intel_crtc *crtc,
1408                            const struct intel_crtc_state *pipe_config)
1409 {
1410         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1411         enum pipe pipe = crtc->pipe;
1412
1413         assert_pipe_disabled(dev_priv, pipe);
1414
1415         /* PLL is protected by panel, make sure we can write it */
1416         assert_panel_unlocked(dev_priv, pipe);
1417
1418         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1419                 _vlv_enable_pll(crtc, pipe_config);
1420
1421         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1422         POSTING_READ(DPLL_MD(pipe));
1423 }
1424
1425
1426 static void _chv_enable_pll(struct intel_crtc *crtc,
1427                             const struct intel_crtc_state *pipe_config)
1428 {
1429         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1430         enum pipe pipe = crtc->pipe;
1431         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1432         u32 tmp;
1433
1434         mutex_lock(&dev_priv->sb_lock);
1435
1436         /* Enable back the 10bit clock to display controller */
1437         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1438         tmp |= DPIO_DCLKP_EN;
1439         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1440
1441         mutex_unlock(&dev_priv->sb_lock);
1442
1443         /*
1444          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1445          */
1446         udelay(1);
1447
1448         /* Enable PLL */
1449         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1450
1451         /* Check PLL is locked */
1452         if (intel_wait_for_register(dev_priv,
1453                                     DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1454                                     1))
1455                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1456 }
1457
1458 static void chv_enable_pll(struct intel_crtc *crtc,
1459                            const struct intel_crtc_state *pipe_config)
1460 {
1461         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1462         enum pipe pipe = crtc->pipe;
1463
1464         assert_pipe_disabled(dev_priv, pipe);
1465
1466         /* PLL is protected by panel, make sure we can write it */
1467         assert_panel_unlocked(dev_priv, pipe);
1468
1469         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1470                 _chv_enable_pll(crtc, pipe_config);
1471
1472         if (pipe != PIPE_A) {
1473                 /*
1474                  * WaPixelRepeatModeFixForC0:chv
1475                  *
1476                  * DPLLCMD is AWOL. Use chicken bits to propagate
1477                  * the value from DPLLBMD to either pipe B or C.
1478                  */
1479                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1480                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1481                 I915_WRITE(CBR4_VLV, 0);
1482                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1483
1484                 /*
1485                  * DPLLB VGA mode also seems to cause problems.
1486                  * We should always have it disabled.
1487                  */
1488                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1489         } else {
1490                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1491                 POSTING_READ(DPLL_MD(pipe));
1492         }
1493 }
1494
1495 static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
1496 {
1497         struct intel_crtc *crtc;
1498         int count = 0;
1499
1500         for_each_intel_crtc(&dev_priv->drm, crtc) {
1501                 count += crtc->base.state->active &&
1502                         intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1503         }
1504
1505         return count;
1506 }
1507
1508 static void i9xx_enable_pll(struct intel_crtc *crtc,
1509                             const struct intel_crtc_state *crtc_state)
1510 {
1511         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1512         i915_reg_t reg = DPLL(crtc->pipe);
1513         u32 dpll = crtc_state->dpll_hw_state.dpll;
1514         int i;
1515
1516         assert_pipe_disabled(dev_priv, crtc->pipe);
1517
1518         /* PLL is protected by panel, make sure we can write it */
1519         if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
1520                 assert_panel_unlocked(dev_priv, crtc->pipe);
1521
1522         /* Enable DVO 2x clock on both PLLs if necessary */
1523         if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
1524                 /*
1525                  * It appears to be important that we don't enable this
1526                  * for the current pipe before otherwise configuring the
1527                  * PLL. No idea how this should be handled if multiple
1528                  * DVO outputs are enabled simultaneosly.
1529                  */
1530                 dpll |= DPLL_DVO_2X_MODE;
1531                 I915_WRITE(DPLL(!crtc->pipe),
1532                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1533         }
1534
1535         /*
1536          * Apparently we need to have VGA mode enabled prior to changing
1537          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1538          * dividers, even though the register value does change.
1539          */
1540         I915_WRITE(reg, 0);
1541
1542         I915_WRITE(reg, dpll);
1543
1544         /* Wait for the clocks to stabilize. */
1545         POSTING_READ(reg);
1546         udelay(150);
1547
1548         if (INTEL_GEN(dev_priv) >= 4) {
1549                 I915_WRITE(DPLL_MD(crtc->pipe),
1550                            crtc_state->dpll_hw_state.dpll_md);
1551         } else {
1552                 /* The pixel multiplier can only be updated once the
1553                  * DPLL is enabled and the clocks are stable.
1554                  *
1555                  * So write it again.
1556                  */
1557                 I915_WRITE(reg, dpll);
1558         }
1559
1560         /* We do this three times for luck */
1561         for (i = 0; i < 3; i++) {
1562                 I915_WRITE(reg, dpll);
1563                 POSTING_READ(reg);
1564                 udelay(150); /* wait for warmup */
1565         }
1566 }
1567
1568 static void i9xx_disable_pll(struct intel_crtc *crtc)
1569 {
1570         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1571         enum pipe pipe = crtc->pipe;
1572
1573         /* Disable DVO 2x clock on both PLLs if necessary */
1574         if (IS_I830(dev_priv) &&
1575             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
1576             !intel_num_dvo_pipes(dev_priv)) {
1577                 I915_WRITE(DPLL(PIPE_B),
1578                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1579                 I915_WRITE(DPLL(PIPE_A),
1580                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1581         }
1582
1583         /* Don't disable pipe or pipe PLLs if needed */
1584         if (IS_I830(dev_priv))
1585                 return;
1586
1587         /* Make sure the pipe isn't still relying on us */
1588         assert_pipe_disabled(dev_priv, pipe);
1589
1590         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1591         POSTING_READ(DPLL(pipe));
1592 }
1593
1594 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1595 {
1596         u32 val;
1597
1598         /* Make sure the pipe isn't still relying on us */
1599         assert_pipe_disabled(dev_priv, pipe);
1600
1601         val = DPLL_INTEGRATED_REF_CLK_VLV |
1602                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1603         if (pipe != PIPE_A)
1604                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1605
1606         I915_WRITE(DPLL(pipe), val);
1607         POSTING_READ(DPLL(pipe));
1608 }
1609
1610 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1611 {
1612         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1613         u32 val;
1614
1615         /* Make sure the pipe isn't still relying on us */
1616         assert_pipe_disabled(dev_priv, pipe);
1617
1618         val = DPLL_SSC_REF_CLK_CHV |
1619                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1620         if (pipe != PIPE_A)
1621                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1622
1623         I915_WRITE(DPLL(pipe), val);
1624         POSTING_READ(DPLL(pipe));
1625
1626         mutex_lock(&dev_priv->sb_lock);
1627
1628         /* Disable 10bit clock to display controller */
1629         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1630         val &= ~DPIO_DCLKP_EN;
1631         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1632
1633         mutex_unlock(&dev_priv->sb_lock);
1634 }
1635
1636 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1637                          struct intel_digital_port *dport,
1638                          unsigned int expected_mask)
1639 {
1640         u32 port_mask;
1641         i915_reg_t dpll_reg;
1642
1643         switch (dport->base.port) {
1644         case PORT_B:
1645                 port_mask = DPLL_PORTB_READY_MASK;
1646                 dpll_reg = DPLL(0);
1647                 break;
1648         case PORT_C:
1649                 port_mask = DPLL_PORTC_READY_MASK;
1650                 dpll_reg = DPLL(0);
1651                 expected_mask <<= 4;
1652                 break;
1653         case PORT_D:
1654                 port_mask = DPLL_PORTD_READY_MASK;
1655                 dpll_reg = DPIO_PHY_STATUS;
1656                 break;
1657         default:
1658                 BUG();
1659         }
1660
1661         if (intel_wait_for_register(dev_priv,
1662                                     dpll_reg, port_mask, expected_mask,
1663                                     1000))
1664                 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1665                      port_name(dport->base.port),
1666                      I915_READ(dpll_reg) & port_mask, expected_mask);
1667 }
1668
1669 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1670                                            enum pipe pipe)
1671 {
1672         struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
1673                                                                 pipe);
1674         i915_reg_t reg;
1675         uint32_t val, pipeconf_val;
1676
1677         /* Make sure PCH DPLL is enabled */
1678         assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
1679
1680         /* FDI must be feeding us bits for PCH ports */
1681         assert_fdi_tx_enabled(dev_priv, pipe);
1682         assert_fdi_rx_enabled(dev_priv, pipe);
1683
1684         if (HAS_PCH_CPT(dev_priv)) {
1685                 /* Workaround: Set the timing override bit before enabling the
1686                  * pch transcoder. */
1687                 reg = TRANS_CHICKEN2(pipe);
1688                 val = I915_READ(reg);
1689                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1690                 I915_WRITE(reg, val);
1691         }
1692
1693         reg = PCH_TRANSCONF(pipe);
1694         val = I915_READ(reg);
1695         pipeconf_val = I915_READ(PIPECONF(pipe));
1696
1697         if (HAS_PCH_IBX(dev_priv)) {
1698                 /*
1699                  * Make the BPC in transcoder be consistent with
1700                  * that in pipeconf reg. For HDMI we must use 8bpc
1701                  * here for both 8bpc and 12bpc.
1702                  */
1703                 val &= ~PIPECONF_BPC_MASK;
1704                 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
1705                         val |= PIPECONF_8BPC;
1706                 else
1707                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1708         }
1709
1710         val &= ~TRANS_INTERLACE_MASK;
1711         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1712                 if (HAS_PCH_IBX(dev_priv) &&
1713                     intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
1714                         val |= TRANS_LEGACY_INTERLACED_ILK;
1715                 else
1716                         val |= TRANS_INTERLACED;
1717         else
1718                 val |= TRANS_PROGRESSIVE;
1719
1720         I915_WRITE(reg, val | TRANS_ENABLE);
1721         if (intel_wait_for_register(dev_priv,
1722                                     reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1723                                     100))
1724                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1725 }
1726
1727 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1728                                       enum transcoder cpu_transcoder)
1729 {
1730         u32 val, pipeconf_val;
1731
1732         /* FDI must be feeding us bits for PCH ports */
1733         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1734         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1735
1736         /* Workaround: set timing override bit. */
1737         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1738         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1739         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1740
1741         val = TRANS_ENABLE;
1742         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1743
1744         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1745             PIPECONF_INTERLACED_ILK)
1746                 val |= TRANS_INTERLACED;
1747         else
1748                 val |= TRANS_PROGRESSIVE;
1749
1750         I915_WRITE(LPT_TRANSCONF, val);
1751         if (intel_wait_for_register(dev_priv,
1752                                     LPT_TRANSCONF,
1753                                     TRANS_STATE_ENABLE,
1754                                     TRANS_STATE_ENABLE,
1755                                     100))
1756                 DRM_ERROR("Failed to enable PCH transcoder\n");
1757 }
1758
1759 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1760                                             enum pipe pipe)
1761 {
1762         i915_reg_t reg;
1763         uint32_t val;
1764
1765         /* FDI relies on the transcoder */
1766         assert_fdi_tx_disabled(dev_priv, pipe);
1767         assert_fdi_rx_disabled(dev_priv, pipe);
1768
1769         /* Ports must be off as well */
1770         assert_pch_ports_disabled(dev_priv, pipe);
1771
1772         reg = PCH_TRANSCONF(pipe);
1773         val = I915_READ(reg);
1774         val &= ~TRANS_ENABLE;
1775         I915_WRITE(reg, val);
1776         /* wait for PCH transcoder off, transcoder state */
1777         if (intel_wait_for_register(dev_priv,
1778                                     reg, TRANS_STATE_ENABLE, 0,
1779                                     50))
1780                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1781
1782         if (HAS_PCH_CPT(dev_priv)) {
1783                 /* Workaround: Clear the timing override chicken bit again. */
1784                 reg = TRANS_CHICKEN2(pipe);
1785                 val = I915_READ(reg);
1786                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1787                 I915_WRITE(reg, val);
1788         }
1789 }
1790
1791 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1792 {
1793         u32 val;
1794
1795         val = I915_READ(LPT_TRANSCONF);
1796         val &= ~TRANS_ENABLE;
1797         I915_WRITE(LPT_TRANSCONF, val);
1798         /* wait for PCH transcoder off, transcoder state */
1799         if (intel_wait_for_register(dev_priv,
1800                                     LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1801                                     50))
1802                 DRM_ERROR("Failed to disable PCH transcoder\n");
1803
1804         /* Workaround: clear timing override bit. */
1805         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1806         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1807         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1808 }
1809
1810 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1811 {
1812         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1813
1814         if (HAS_PCH_LPT(dev_priv))
1815                 return PIPE_A;
1816         else
1817                 return crtc->pipe;
1818 }
1819
1820 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1821 {
1822         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1823         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1824         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1825         enum pipe pipe = crtc->pipe;
1826         i915_reg_t reg;
1827         u32 val;
1828
1829         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1830
1831         assert_planes_disabled(crtc);
1832
1833         /*
1834          * A pipe without a PLL won't actually be able to drive bits from
1835          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1836          * need the check.
1837          */
1838         if (HAS_GMCH_DISPLAY(dev_priv)) {
1839                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1840                         assert_dsi_pll_enabled(dev_priv);
1841                 else
1842                         assert_pll_enabled(dev_priv, pipe);
1843         } else {
1844                 if (new_crtc_state->has_pch_encoder) {
1845                         /* if driving the PCH, we need FDI enabled */
1846                         assert_fdi_rx_pll_enabled(dev_priv,
1847                                                   intel_crtc_pch_transcoder(crtc));
1848                         assert_fdi_tx_pll_enabled(dev_priv,
1849                                                   (enum pipe) cpu_transcoder);
1850                 }
1851                 /* FIXME: assert CPU port conditions for SNB+ */
1852         }
1853
1854         reg = PIPECONF(cpu_transcoder);
1855         val = I915_READ(reg);
1856         if (val & PIPECONF_ENABLE) {
1857                 /* we keep both pipes enabled on 830 */
1858                 WARN_ON(!IS_I830(dev_priv));
1859                 return;
1860         }
1861
1862         I915_WRITE(reg, val | PIPECONF_ENABLE);
1863         POSTING_READ(reg);
1864
1865         /*
1866          * Until the pipe starts PIPEDSL reads will return a stale value,
1867          * which causes an apparent vblank timestamp jump when PIPEDSL
1868          * resets to its proper value. That also messes up the frame count
1869          * when it's derived from the timestamps. So let's wait for the
1870          * pipe to start properly before we call drm_crtc_vblank_on()
1871          */
1872         if (dev_priv->drm.max_vblank_count == 0)
1873                 intel_wait_for_pipe_scanline_moving(crtc);
1874 }
1875
1876 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1877 {
1878         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1879         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1880         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1881         enum pipe pipe = crtc->pipe;
1882         i915_reg_t reg;
1883         u32 val;
1884
1885         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1886
1887         /*
1888          * Make sure planes won't keep trying to pump pixels to us,
1889          * or we might hang the display.
1890          */
1891         assert_planes_disabled(crtc);
1892
1893         reg = PIPECONF(cpu_transcoder);
1894         val = I915_READ(reg);
1895         if ((val & PIPECONF_ENABLE) == 0)
1896                 return;
1897
1898         /*
1899          * Double wide has implications for planes
1900          * so best keep it disabled when not needed.
1901          */
1902         if (old_crtc_state->double_wide)
1903                 val &= ~PIPECONF_DOUBLE_WIDE;
1904
1905         /* Don't disable pipe or pipe PLLs if needed */
1906         if (!IS_I830(dev_priv))
1907                 val &= ~PIPECONF_ENABLE;
1908
1909         I915_WRITE(reg, val);
1910         if ((val & PIPECONF_ENABLE) == 0)
1911                 intel_wait_for_pipe_off(old_crtc_state);
1912 }
1913
1914 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1915 {
1916         return IS_GEN2(dev_priv) ? 2048 : 4096;
1917 }
1918
1919 static unsigned int
1920 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1921 {
1922         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1923         unsigned int cpp = fb->format->cpp[color_plane];
1924
1925         switch (fb->modifier) {
1926         case DRM_FORMAT_MOD_LINEAR:
1927                 return cpp;
1928         case I915_FORMAT_MOD_X_TILED:
1929                 if (IS_GEN2(dev_priv))
1930                         return 128;
1931                 else
1932                         return 512;
1933         case I915_FORMAT_MOD_Y_TILED_CCS:
1934                 if (color_plane == 1)
1935                         return 128;
1936                 /* fall through */
1937         case I915_FORMAT_MOD_Y_TILED:
1938                 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
1939                         return 128;
1940                 else
1941                         return 512;
1942         case I915_FORMAT_MOD_Yf_TILED_CCS:
1943                 if (color_plane == 1)
1944                         return 128;
1945                 /* fall through */
1946         case I915_FORMAT_MOD_Yf_TILED:
1947                 switch (cpp) {
1948                 case 1:
1949                         return 64;
1950                 case 2:
1951                 case 4:
1952                         return 128;
1953                 case 8:
1954                 case 16:
1955                         return 256;
1956                 default:
1957                         MISSING_CASE(cpp);
1958                         return cpp;
1959                 }
1960                 break;
1961         default:
1962                 MISSING_CASE(fb->modifier);
1963                 return cpp;
1964         }
1965 }
1966
1967 static unsigned int
1968 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1969 {
1970         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1971                 return 1;
1972         else
1973                 return intel_tile_size(to_i915(fb->dev)) /
1974                         intel_tile_width_bytes(fb, color_plane);
1975 }
1976
1977 /* Return the tile dimensions in pixel units */
1978 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1979                             unsigned int *tile_width,
1980                             unsigned int *tile_height)
1981 {
1982         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1983         unsigned int cpp = fb->format->cpp[color_plane];
1984
1985         *tile_width = tile_width_bytes / cpp;
1986         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1987 }
1988
1989 unsigned int
1990 intel_fb_align_height(const struct drm_framebuffer *fb,
1991                       int color_plane, unsigned int height)
1992 {
1993         unsigned int tile_height = intel_tile_height(fb, color_plane);
1994
1995         return ALIGN(height, tile_height);
1996 }
1997
1998 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1999 {
2000         unsigned int size = 0;
2001         int i;
2002
2003         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2004                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2005
2006         return size;
2007 }
2008
2009 static void
2010 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2011                         const struct drm_framebuffer *fb,
2012                         unsigned int rotation)
2013 {
2014         view->type = I915_GGTT_VIEW_NORMAL;
2015         if (drm_rotation_90_or_270(rotation)) {
2016                 view->type = I915_GGTT_VIEW_ROTATED;
2017                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2018         }
2019 }
2020
2021 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2022 {
2023         if (IS_I830(dev_priv))
2024                 return 16 * 1024;
2025         else if (IS_I85X(dev_priv))
2026                 return 256;
2027         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2028                 return 32;
2029         else
2030                 return 4 * 1024;
2031 }
2032
2033 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2034 {
2035         if (INTEL_GEN(dev_priv) >= 9)
2036                 return 256 * 1024;
2037         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2038                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2039                 return 128 * 1024;
2040         else if (INTEL_GEN(dev_priv) >= 4)
2041                 return 4 * 1024;
2042         else
2043                 return 0;
2044 }
2045
2046 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2047                                          int color_plane)
2048 {
2049         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2050
2051         /* AUX_DIST needs only 4K alignment */
2052         if (color_plane == 1)
2053                 return 4096;
2054
2055         switch (fb->modifier) {
2056         case DRM_FORMAT_MOD_LINEAR:
2057                 return intel_linear_alignment(dev_priv);
2058         case I915_FORMAT_MOD_X_TILED:
2059                 if (INTEL_GEN(dev_priv) >= 9)
2060                         return 256 * 1024;
2061                 return 0;
2062         case I915_FORMAT_MOD_Y_TILED_CCS:
2063         case I915_FORMAT_MOD_Yf_TILED_CCS:
2064         case I915_FORMAT_MOD_Y_TILED:
2065         case I915_FORMAT_MOD_Yf_TILED:
2066                 return 1 * 1024 * 1024;
2067         default:
2068                 MISSING_CASE(fb->modifier);
2069                 return 0;
2070         }
2071 }
2072
2073 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2074 {
2075         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2076         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2077
2078         return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
2079 }
2080
2081 struct i915_vma *
2082 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2083                            const struct i915_ggtt_view *view,
2084                            bool uses_fence,
2085                            unsigned long *out_flags)
2086 {
2087         struct drm_device *dev = fb->dev;
2088         struct drm_i915_private *dev_priv = to_i915(dev);
2089         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2090         struct i915_vma *vma;
2091         unsigned int pinctl;
2092         u32 alignment;
2093
2094         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2095
2096         alignment = intel_surf_alignment(fb, 0);
2097
2098         /* Note that the w/a also requires 64 PTE of padding following the
2099          * bo. We currently fill all unused PTE with the shadow page and so
2100          * we should always have valid PTE following the scanout preventing
2101          * the VT-d warning.
2102          */
2103         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2104                 alignment = 256 * 1024;
2105
2106         /*
2107          * Global gtt pte registers are special registers which actually forward
2108          * writes to a chunk of system memory. Which means that there is no risk
2109          * that the register values disappear as soon as we call
2110          * intel_runtime_pm_put(), so it is correct to wrap only the
2111          * pin/unpin/fence and not more.
2112          */
2113         intel_runtime_pm_get(dev_priv);
2114
2115         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2116
2117         pinctl = 0;
2118
2119         /* Valleyview is definitely limited to scanning out the first
2120          * 512MiB. Lets presume this behaviour was inherited from the
2121          * g4x display engine and that all earlier gen are similarly
2122          * limited. Testing suggests that it is a little more
2123          * complicated than this. For example, Cherryview appears quite
2124          * happy to scanout from anywhere within its global aperture.
2125          */
2126         if (HAS_GMCH_DISPLAY(dev_priv))
2127                 pinctl |= PIN_MAPPABLE;
2128
2129         vma = i915_gem_object_pin_to_display_plane(obj,
2130                                                    alignment, view, pinctl);
2131         if (IS_ERR(vma))
2132                 goto err;
2133
2134         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2135                 int ret;
2136
2137                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2138                  * fence, whereas 965+ only requires a fence if using
2139                  * framebuffer compression.  For simplicity, we always, when
2140                  * possible, install a fence as the cost is not that onerous.
2141                  *
2142                  * If we fail to fence the tiled scanout, then either the
2143                  * modeset will reject the change (which is highly unlikely as
2144                  * the affected systems, all but one, do not have unmappable
2145                  * space) or we will not be able to enable full powersaving
2146                  * techniques (also likely not to apply due to various limits
2147                  * FBC and the like impose on the size of the buffer, which
2148                  * presumably we violated anyway with this unmappable buffer).
2149                  * Anyway, it is presumably better to stumble onwards with
2150                  * something and try to run the system in a "less than optimal"
2151                  * mode that matches the user configuration.
2152                  */
2153                 ret = i915_vma_pin_fence(vma);
2154                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2155                         i915_gem_object_unpin_from_display_plane(vma);
2156                         vma = ERR_PTR(ret);
2157                         goto err;
2158                 }
2159
2160                 if (ret == 0 && vma->fence)
2161                         *out_flags |= PLANE_HAS_FENCE;
2162         }
2163
2164         i915_vma_get(vma);
2165 err:
2166         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2167
2168         intel_runtime_pm_put(dev_priv);
2169         return vma;
2170 }
2171
2172 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2173 {
2174         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2175
2176         if (flags & PLANE_HAS_FENCE)
2177                 i915_vma_unpin_fence(vma);
2178         i915_gem_object_unpin_from_display_plane(vma);
2179         i915_vma_put(vma);
2180 }
2181
2182 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2183                           unsigned int rotation)
2184 {
2185         if (drm_rotation_90_or_270(rotation))
2186                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2187         else
2188                 return fb->pitches[color_plane];
2189 }
2190
2191 /*
2192  * Convert the x/y offsets into a linear offset.
2193  * Only valid with 0/180 degree rotation, which is fine since linear
2194  * offset is only used with linear buffers on pre-hsw and tiled buffers
2195  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2196  */
2197 u32 intel_fb_xy_to_linear(int x, int y,
2198                           const struct intel_plane_state *state,
2199                           int color_plane)
2200 {
2201         const struct drm_framebuffer *fb = state->base.fb;
2202         unsigned int cpp = fb->format->cpp[color_plane];
2203         unsigned int pitch = state->color_plane[color_plane].stride;
2204
2205         return y * pitch + x * cpp;
2206 }
2207
2208 /*
2209  * Add the x/y offsets derived from fb->offsets[] to the user
2210  * specified plane src x/y offsets. The resulting x/y offsets
2211  * specify the start of scanout from the beginning of the gtt mapping.
2212  */
2213 void intel_add_fb_offsets(int *x, int *y,
2214                           const struct intel_plane_state *state,
2215                           int color_plane)
2216
2217 {
2218         const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2219         unsigned int rotation = state->base.rotation;
2220
2221         if (drm_rotation_90_or_270(rotation)) {
2222                 *x += intel_fb->rotated[color_plane].x;
2223                 *y += intel_fb->rotated[color_plane].y;
2224         } else {
2225                 *x += intel_fb->normal[color_plane].x;
2226                 *y += intel_fb->normal[color_plane].y;
2227         }
2228 }
2229
2230 static u32 intel_adjust_tile_offset(int *x, int *y,
2231                                     unsigned int tile_width,
2232                                     unsigned int tile_height,
2233                                     unsigned int tile_size,
2234                                     unsigned int pitch_tiles,
2235                                     u32 old_offset,
2236                                     u32 new_offset)
2237 {
2238         unsigned int pitch_pixels = pitch_tiles * tile_width;
2239         unsigned int tiles;
2240
2241         WARN_ON(old_offset & (tile_size - 1));
2242         WARN_ON(new_offset & (tile_size - 1));
2243         WARN_ON(new_offset > old_offset);
2244
2245         tiles = (old_offset - new_offset) / tile_size;
2246
2247         *y += tiles / pitch_tiles * tile_height;
2248         *x += tiles % pitch_tiles * tile_width;
2249
2250         /* minimize x in case it got needlessly big */
2251         *y += *x / pitch_pixels * tile_height;
2252         *x %= pitch_pixels;
2253
2254         return new_offset;
2255 }
2256
2257 static u32 intel_adjust_aligned_offset(int *x, int *y,
2258                                        const struct drm_framebuffer *fb,
2259                                        int color_plane,
2260                                        unsigned int rotation,
2261                                        unsigned int pitch,
2262                                        u32 old_offset, u32 new_offset)
2263 {
2264         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2265         unsigned int cpp = fb->format->cpp[color_plane];
2266
2267         WARN_ON(new_offset > old_offset);
2268
2269         if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
2270                 unsigned int tile_size, tile_width, tile_height;
2271                 unsigned int pitch_tiles;
2272
2273                 tile_size = intel_tile_size(dev_priv);
2274                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2275
2276                 if (drm_rotation_90_or_270(rotation)) {
2277                         pitch_tiles = pitch / tile_height;
2278                         swap(tile_width, tile_height);
2279                 } else {
2280                         pitch_tiles = pitch / (tile_width * cpp);
2281                 }
2282
2283                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2284                                          tile_size, pitch_tiles,
2285                                          old_offset, new_offset);
2286         } else {
2287                 old_offset += *y * pitch + *x * cpp;
2288
2289                 *y = (old_offset - new_offset) / pitch;
2290                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2291         }
2292
2293         return new_offset;
2294 }
2295
2296 /*
2297  * Adjust the tile offset by moving the difference into
2298  * the x/y offsets.
2299  */
2300 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2301                                              const struct intel_plane_state *state,
2302                                              int color_plane,
2303                                              u32 old_offset, u32 new_offset)
2304 {
2305         return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2306                                            state->base.rotation,
2307                                            state->color_plane[color_plane].stride,
2308                                            old_offset, new_offset);
2309 }
2310
2311 /*
2312  * Computes the aligned offset to the base tile and adjusts
2313  * x, y. bytes per pixel is assumed to be a power-of-two.
2314  *
2315  * In the 90/270 rotated case, x and y are assumed
2316  * to be already rotated to match the rotated GTT view, and
2317  * pitch is the tile_height aligned framebuffer height.
2318  *
2319  * This function is used when computing the derived information
2320  * under intel_framebuffer, so using any of that information
2321  * here is not allowed. Anything under drm_framebuffer can be
2322  * used. This is why the user has to pass in the pitch since it
2323  * is specified in the rotated orientation.
2324  */
2325 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2326                                         int *x, int *y,
2327                                         const struct drm_framebuffer *fb,
2328                                         int color_plane,
2329                                         unsigned int pitch,
2330                                         unsigned int rotation,
2331                                         u32 alignment)
2332 {
2333         uint64_t fb_modifier = fb->modifier;
2334         unsigned int cpp = fb->format->cpp[color_plane];
2335         u32 offset, offset_aligned;
2336
2337         if (alignment)
2338                 alignment--;
2339
2340         if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
2341                 unsigned int tile_size, tile_width, tile_height;
2342                 unsigned int tile_rows, tiles, pitch_tiles;
2343
2344                 tile_size = intel_tile_size(dev_priv);
2345                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2346
2347                 if (drm_rotation_90_or_270(rotation)) {
2348                         pitch_tiles = pitch / tile_height;
2349                         swap(tile_width, tile_height);
2350                 } else {
2351                         pitch_tiles = pitch / (tile_width * cpp);
2352                 }
2353
2354                 tile_rows = *y / tile_height;
2355                 *y %= tile_height;
2356
2357                 tiles = *x / tile_width;
2358                 *x %= tile_width;
2359
2360                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2361                 offset_aligned = offset & ~alignment;
2362
2363                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2364                                          tile_size, pitch_tiles,
2365                                          offset, offset_aligned);
2366         } else {
2367                 offset = *y * pitch + *x * cpp;
2368                 offset_aligned = offset & ~alignment;
2369
2370                 *y = (offset & alignment) / pitch;
2371                 *x = ((offset & alignment) - *y * pitch) / cpp;
2372         }
2373
2374         return offset_aligned;
2375 }
2376
2377 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2378                                               const struct intel_plane_state *state,
2379                                               int color_plane)
2380 {
2381         struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2382         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2383         const struct drm_framebuffer *fb = state->base.fb;
2384         unsigned int rotation = state->base.rotation;
2385         int pitch = state->color_plane[color_plane].stride;
2386         u32 alignment;
2387
2388         if (intel_plane->id == PLANE_CURSOR)
2389                 alignment = intel_cursor_alignment(dev_priv);
2390         else
2391                 alignment = intel_surf_alignment(fb, color_plane);
2392
2393         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2394                                             pitch, rotation, alignment);
2395 }
2396
2397 /* Convert the fb->offset[] into x/y offsets */
2398 static int intel_fb_offset_to_xy(int *x, int *y,
2399                                  const struct drm_framebuffer *fb,
2400                                  int color_plane)
2401 {
2402         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2403
2404         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2405             fb->offsets[color_plane] % intel_tile_size(dev_priv))
2406                 return -EINVAL;
2407
2408         *x = 0;
2409         *y = 0;
2410
2411         intel_adjust_aligned_offset(x, y,
2412                                     fb, color_plane, DRM_MODE_ROTATE_0,
2413                                     fb->pitches[color_plane],
2414                                     fb->offsets[color_plane], 0);
2415
2416         return 0;
2417 }
2418
2419 static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
2420 {
2421         switch (fb_modifier) {
2422         case I915_FORMAT_MOD_X_TILED:
2423                 return I915_TILING_X;
2424         case I915_FORMAT_MOD_Y_TILED:
2425         case I915_FORMAT_MOD_Y_TILED_CCS:
2426                 return I915_TILING_Y;
2427         default:
2428                 return I915_TILING_NONE;
2429         }
2430 }
2431
2432 /*
2433  * From the Sky Lake PRM:
2434  * "The Color Control Surface (CCS) contains the compression status of
2435  *  the cache-line pairs. The compression state of the cache-line pair
2436  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2437  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2438  *  cache-line-pairs. CCS is always Y tiled."
2439  *
2440  * Since cache line pairs refers to horizontally adjacent cache lines,
2441  * each cache line in the CCS corresponds to an area of 32x16 cache
2442  * lines on the main surface. Since each pixel is 4 bytes, this gives
2443  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2444  * main surface.
2445  */
2446 static const struct drm_format_info ccs_formats[] = {
2447         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2448         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2449         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2450         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2451 };
2452
2453 static const struct drm_format_info *
2454 lookup_format_info(const struct drm_format_info formats[],
2455                    int num_formats, u32 format)
2456 {
2457         int i;
2458
2459         for (i = 0; i < num_formats; i++) {
2460                 if (formats[i].format == format)
2461                         return &formats[i];
2462         }
2463
2464         return NULL;
2465 }
2466
2467 static const struct drm_format_info *
2468 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2469 {
2470         switch (cmd->modifier[0]) {
2471         case I915_FORMAT_MOD_Y_TILED_CCS:
2472         case I915_FORMAT_MOD_Yf_TILED_CCS:
2473                 return lookup_format_info(ccs_formats,
2474                                           ARRAY_SIZE(ccs_formats),
2475                                           cmd->pixel_format);
2476         default:
2477                 return NULL;
2478         }
2479 }
2480
2481 bool is_ccs_modifier(u64 modifier)
2482 {
2483         return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2484                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2485 }
2486
2487 static int
2488 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2489                    struct drm_framebuffer *fb)
2490 {
2491         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2492         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2493         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2494         u32 gtt_offset_rotated = 0;
2495         unsigned int max_size = 0;
2496         int i, num_planes = fb->format->num_planes;
2497         unsigned int tile_size = intel_tile_size(dev_priv);
2498
2499         for (i = 0; i < num_planes; i++) {
2500                 unsigned int width, height;
2501                 unsigned int cpp, size;
2502                 u32 offset;
2503                 int x, y;
2504                 int ret;
2505
2506                 cpp = fb->format->cpp[i];
2507                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2508                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2509
2510                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2511                 if (ret) {
2512                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2513                                       i, fb->offsets[i]);
2514                         return ret;
2515                 }
2516
2517                 if (is_ccs_modifier(fb->modifier) && i == 1) {
2518                         int hsub = fb->format->hsub;
2519                         int vsub = fb->format->vsub;
2520                         int tile_width, tile_height;
2521                         int main_x, main_y;
2522                         int ccs_x, ccs_y;
2523
2524                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2525                         tile_width *= hsub;
2526                         tile_height *= vsub;
2527
2528                         ccs_x = (x * hsub) % tile_width;
2529                         ccs_y = (y * vsub) % tile_height;
2530                         main_x = intel_fb->normal[0].x % tile_width;
2531                         main_y = intel_fb->normal[0].y % tile_height;
2532
2533                         /*
2534                          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2535                          * x/y offsets must match between CCS and the main surface.
2536                          */
2537                         if (main_x != ccs_x || main_y != ccs_y) {
2538                                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2539                                               main_x, main_y,
2540                                               ccs_x, ccs_y,
2541                                               intel_fb->normal[0].x,
2542                                               intel_fb->normal[0].y,
2543                                               x, y);
2544                                 return -EINVAL;
2545                         }
2546                 }
2547
2548                 /*
2549                  * The fence (if used) is aligned to the start of the object
2550                  * so having the framebuffer wrap around across the edge of the
2551                  * fenced region doesn't really work. We have no API to configure
2552                  * the fence start offset within the object (nor could we probably
2553                  * on gen2/3). So it's just easier if we just require that the
2554                  * fb layout agrees with the fence layout. We already check that the
2555                  * fb stride matches the fence stride elsewhere.
2556                  */
2557                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2558                     (x + width) * cpp > fb->pitches[i]) {
2559                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2560                                       i, fb->offsets[i]);
2561                         return -EINVAL;
2562                 }
2563
2564                 /*
2565                  * First pixel of the framebuffer from
2566                  * the start of the normal gtt mapping.
2567                  */
2568                 intel_fb->normal[i].x = x;
2569                 intel_fb->normal[i].y = y;
2570
2571                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2572                                                       fb->pitches[i],
2573                                                       DRM_MODE_ROTATE_0,
2574                                                       tile_size);
2575                 offset /= tile_size;
2576
2577                 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
2578                         unsigned int tile_width, tile_height;
2579                         unsigned int pitch_tiles;
2580                         struct drm_rect r;
2581
2582                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2583
2584                         rot_info->plane[i].offset = offset;
2585                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2586                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2587                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2588
2589                         intel_fb->rotated[i].pitch =
2590                                 rot_info->plane[i].height * tile_height;
2591
2592                         /* how many tiles does this plane need */
2593                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2594                         /*
2595                          * If the plane isn't horizontally tile aligned,
2596                          * we need one more tile.
2597                          */
2598                         if (x != 0)
2599                                 size++;
2600
2601                         /* rotate the x/y offsets to match the GTT view */
2602                         r.x1 = x;
2603                         r.y1 = y;
2604                         r.x2 = x + width;
2605                         r.y2 = y + height;
2606                         drm_rect_rotate(&r,
2607                                         rot_info->plane[i].width * tile_width,
2608                                         rot_info->plane[i].height * tile_height,
2609                                         DRM_MODE_ROTATE_270);
2610                         x = r.x1;
2611                         y = r.y1;
2612
2613                         /* rotate the tile dimensions to match the GTT view */
2614                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2615                         swap(tile_width, tile_height);
2616
2617                         /*
2618                          * We only keep the x/y offsets, so push all of the
2619                          * gtt offset into the x/y offsets.
2620                          */
2621                         intel_adjust_tile_offset(&x, &y,
2622                                                  tile_width, tile_height,
2623                                                  tile_size, pitch_tiles,
2624                                                  gtt_offset_rotated * tile_size, 0);
2625
2626                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2627
2628                         /*
2629                          * First pixel of the framebuffer from
2630                          * the start of the rotated gtt mapping.
2631                          */
2632                         intel_fb->rotated[i].x = x;
2633                         intel_fb->rotated[i].y = y;
2634                 } else {
2635                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2636                                             x * cpp, tile_size);
2637                 }
2638
2639                 /* how many tiles in total needed in the bo */
2640                 max_size = max(max_size, offset + size);
2641         }
2642
2643         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2644                 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2645                               mul_u32_u32(max_size, tile_size), obj->base.size);
2646                 return -EINVAL;
2647         }
2648
2649         return 0;
2650 }
2651
2652 static int i9xx_format_to_fourcc(int format)
2653 {
2654         switch (format) {
2655         case DISPPLANE_8BPP:
2656                 return DRM_FORMAT_C8;
2657         case DISPPLANE_BGRX555:
2658                 return DRM_FORMAT_XRGB1555;
2659         case DISPPLANE_BGRX565:
2660                 return DRM_FORMAT_RGB565;
2661         default:
2662         case DISPPLANE_BGRX888:
2663                 return DRM_FORMAT_XRGB8888;
2664         case DISPPLANE_RGBX888:
2665                 return DRM_FORMAT_XBGR8888;
2666         case DISPPLANE_BGRX101010:
2667                 return DRM_FORMAT_XRGB2101010;
2668         case DISPPLANE_RGBX101010:
2669                 return DRM_FORMAT_XBGR2101010;
2670         }
2671 }
2672
2673 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2674 {
2675         switch (format) {
2676         case PLANE_CTL_FORMAT_RGB_565:
2677                 return DRM_FORMAT_RGB565;
2678         case PLANE_CTL_FORMAT_NV12:
2679                 return DRM_FORMAT_NV12;
2680         default:
2681         case PLANE_CTL_FORMAT_XRGB_8888:
2682                 if (rgb_order) {
2683                         if (alpha)
2684                                 return DRM_FORMAT_ABGR8888;
2685                         else
2686                                 return DRM_FORMAT_XBGR8888;
2687                 } else {
2688                         if (alpha)
2689                                 return DRM_FORMAT_ARGB8888;
2690                         else
2691                                 return DRM_FORMAT_XRGB8888;
2692                 }
2693         case PLANE_CTL_FORMAT_XRGB_2101010:
2694                 if (rgb_order)
2695                         return DRM_FORMAT_XBGR2101010;
2696                 else
2697                         return DRM_FORMAT_XRGB2101010;
2698         }
2699 }
2700
2701 static bool
2702 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2703                               struct intel_initial_plane_config *plane_config)
2704 {
2705         struct drm_device *dev = crtc->base.dev;
2706         struct drm_i915_private *dev_priv = to_i915(dev);
2707         struct drm_i915_gem_object *obj = NULL;
2708         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2709         struct drm_framebuffer *fb = &plane_config->fb->base;
2710         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2711         u32 size_aligned = round_up(plane_config->base + plane_config->size,
2712                                     PAGE_SIZE);
2713
2714         size_aligned -= base_aligned;
2715
2716         if (plane_config->size == 0)
2717                 return false;
2718
2719         /* If the FB is too big, just don't use it since fbdev is not very
2720          * important and we should probably use that space with FBC or other
2721          * features. */
2722         if (size_aligned * 2 > dev_priv->stolen_usable_size)
2723                 return false;
2724
2725         mutex_lock(&dev->struct_mutex);
2726         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
2727                                                              base_aligned,
2728                                                              base_aligned,
2729                                                              size_aligned);
2730         mutex_unlock(&dev->struct_mutex);
2731         if (!obj)
2732                 return false;
2733
2734         if (plane_config->tiling == I915_TILING_X)
2735                 obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
2736
2737         mode_cmd.pixel_format = fb->format->format;
2738         mode_cmd.width = fb->width;
2739         mode_cmd.height = fb->height;
2740         mode_cmd.pitches[0] = fb->pitches[0];
2741         mode_cmd.modifier[0] = fb->modifier;
2742         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2743
2744         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
2745                 DRM_DEBUG_KMS("intel fb init failed\n");
2746                 goto out_unref_obj;
2747         }
2748
2749
2750         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2751         return true;
2752
2753 out_unref_obj:
2754         i915_gem_object_put(obj);
2755         return false;
2756 }
2757
2758 static void
2759 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2760                         struct intel_plane_state *plane_state,
2761                         bool visible)
2762 {
2763         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2764
2765         plane_state->base.visible = visible;
2766
2767         if (visible)
2768                 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
2769         else
2770                 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
2771 }
2772
2773 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
2774 {
2775         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2776         struct drm_plane *plane;
2777
2778         /*
2779          * Active_planes aliases if multiple "primary" or cursor planes
2780          * have been used on the same (or wrong) pipe. plane_mask uses
2781          * unique ids, hence we can use that to reconstruct active_planes.
2782          */
2783         crtc_state->active_planes = 0;
2784
2785         drm_for_each_plane_mask(plane, &dev_priv->drm,
2786                                 crtc_state->base.plane_mask)
2787                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2788 }
2789
2790 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2791                                          struct intel_plane *plane)
2792 {
2793         struct intel_crtc_state *crtc_state =
2794                 to_intel_crtc_state(crtc->base.state);
2795         struct intel_plane_state *plane_state =
2796                 to_intel_plane_state(plane->base.state);
2797
2798         DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2799                       plane->base.base.id, plane->base.name,
2800                       crtc->base.base.id, crtc->base.name);
2801
2802         intel_set_plane_visible(crtc_state, plane_state, false);
2803         fixup_active_planes(crtc_state);
2804
2805         if (plane->id == PLANE_PRIMARY)
2806                 intel_pre_disable_primary_noatomic(&crtc->base);
2807
2808         trace_intel_disable_plane(&plane->base, crtc);
2809         plane->disable_plane(plane, crtc);
2810 }
2811
2812 static void
2813 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2814                              struct intel_initial_plane_config *plane_config)
2815 {
2816         struct drm_device *dev = intel_crtc->base.dev;
2817         struct drm_i915_private *dev_priv = to_i915(dev);
2818         struct drm_crtc *c;
2819         struct drm_i915_gem_object *obj;
2820         struct drm_plane *primary = intel_crtc->base.primary;
2821         struct drm_plane_state *plane_state = primary->state;
2822         struct intel_plane *intel_plane = to_intel_plane(primary);
2823         struct intel_plane_state *intel_state =
2824                 to_intel_plane_state(plane_state);
2825         struct drm_framebuffer *fb;
2826
2827         if (!plane_config->fb)
2828                 return;
2829
2830         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2831                 fb = &plane_config->fb->base;
2832                 goto valid_fb;
2833         }
2834
2835         kfree(plane_config->fb);
2836
2837         /*
2838          * Failed to alloc the obj, check to see if we should share
2839          * an fb with another CRTC instead
2840          */
2841         for_each_crtc(dev, c) {
2842                 struct intel_plane_state *state;
2843
2844                 if (c == &intel_crtc->base)
2845                         continue;
2846
2847                 if (!to_intel_crtc(c)->active)
2848                         continue;
2849
2850                 state = to_intel_plane_state(c->primary->state);
2851                 if (!state->vma)
2852                         continue;
2853
2854                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2855                         fb = state->base.fb;
2856                         drm_framebuffer_get(fb);
2857                         goto valid_fb;
2858                 }
2859         }
2860
2861         /*
2862          * We've failed to reconstruct the BIOS FB.  Current display state
2863          * indicates that the primary plane is visible, but has a NULL FB,
2864          * which will lead to problems later if we don't fix it up.  The
2865          * simplest solution is to just disable the primary plane now and
2866          * pretend the BIOS never had it enabled.
2867          */
2868         intel_plane_disable_noatomic(intel_crtc, intel_plane);
2869
2870         return;
2871
2872 valid_fb:
2873         intel_fill_fb_ggtt_view(&intel_state->view, fb,
2874                                 intel_state->base.rotation);
2875         intel_state->color_plane[0].stride =
2876                 intel_fb_pitch(fb, 0, intel_state->base.rotation);
2877
2878         mutex_lock(&dev->struct_mutex);
2879         intel_state->vma =
2880                 intel_pin_and_fence_fb_obj(fb,
2881                                            &intel_state->view,
2882                                            intel_plane_uses_fence(intel_state),
2883                                            &intel_state->flags);
2884         mutex_unlock(&dev->struct_mutex);
2885         if (IS_ERR(intel_state->vma)) {
2886                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2887                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
2888
2889                 intel_state->vma = NULL;
2890                 drm_framebuffer_put(fb);
2891                 return;
2892         }
2893
2894         obj = intel_fb_obj(fb);
2895         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2896
2897         plane_state->src_x = 0;
2898         plane_state->src_y = 0;
2899         plane_state->src_w = fb->width << 16;
2900         plane_state->src_h = fb->height << 16;
2901
2902         plane_state->crtc_x = 0;
2903         plane_state->crtc_y = 0;
2904         plane_state->crtc_w = fb->width;
2905         plane_state->crtc_h = fb->height;
2906
2907         intel_state->base.src = drm_plane_state_src(plane_state);
2908         intel_state->base.dst = drm_plane_state_dest(plane_state);
2909
2910         if (i915_gem_object_is_tiled(obj))
2911                 dev_priv->preserve_bios_swizzle = true;
2912
2913         plane_state->fb = fb;
2914         plane_state->crtc = &intel_crtc->base;
2915
2916         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2917                   &obj->frontbuffer_bits);
2918 }
2919
2920 static int skl_max_plane_width(const struct drm_framebuffer *fb,
2921                                int color_plane,
2922                                unsigned int rotation)
2923 {
2924         int cpp = fb->format->cpp[color_plane];
2925
2926         switch (fb->modifier) {
2927         case DRM_FORMAT_MOD_LINEAR:
2928         case I915_FORMAT_MOD_X_TILED:
2929                 switch (cpp) {
2930                 case 8:
2931                         return 4096;
2932                 case 4:
2933                 case 2:
2934                 case 1:
2935                         return 8192;
2936                 default:
2937                         MISSING_CASE(cpp);
2938                         break;
2939                 }
2940                 break;
2941         case I915_FORMAT_MOD_Y_TILED_CCS:
2942         case I915_FORMAT_MOD_Yf_TILED_CCS:
2943                 /* FIXME AUX plane? */
2944         case I915_FORMAT_MOD_Y_TILED:
2945         case I915_FORMAT_MOD_Yf_TILED:
2946                 switch (cpp) {
2947                 case 8:
2948                         return 2048;
2949                 case 4:
2950                         return 4096;
2951                 case 2:
2952                 case 1:
2953                         return 8192;
2954                 default:
2955                         MISSING_CASE(cpp);
2956                         break;
2957                 }
2958                 break;
2959         default:
2960                 MISSING_CASE(fb->modifier);
2961         }
2962
2963         return 2048;
2964 }
2965
2966 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
2967                                            int main_x, int main_y, u32 main_offset)
2968 {
2969         const struct drm_framebuffer *fb = plane_state->base.fb;
2970         int hsub = fb->format->hsub;
2971         int vsub = fb->format->vsub;
2972         int aux_x = plane_state->color_plane[1].x;
2973         int aux_y = plane_state->color_plane[1].y;
2974         u32 aux_offset = plane_state->color_plane[1].offset;
2975         u32 alignment = intel_surf_alignment(fb, 1);
2976
2977         while (aux_offset >= main_offset && aux_y <= main_y) {
2978                 int x, y;
2979
2980                 if (aux_x == main_x && aux_y == main_y)
2981                         break;
2982
2983                 if (aux_offset == 0)
2984                         break;
2985
2986                 x = aux_x / hsub;
2987                 y = aux_y / vsub;
2988                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
2989                                                                aux_offset, aux_offset - alignment);
2990                 aux_x = x * hsub + aux_x % hsub;
2991                 aux_y = y * vsub + aux_y % vsub;
2992         }
2993
2994         if (aux_x != main_x || aux_y != main_y)
2995                 return false;
2996
2997         plane_state->color_plane[1].offset = aux_offset;
2998         plane_state->color_plane[1].x = aux_x;
2999         plane_state->color_plane[1].y = aux_y;
3000
3001         return true;
3002 }
3003
3004 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3005 {
3006         const struct drm_framebuffer *fb = plane_state->base.fb;
3007         unsigned int rotation = plane_state->base.rotation;
3008         int x = plane_state->base.src.x1 >> 16;
3009         int y = plane_state->base.src.y1 >> 16;
3010         int w = drm_rect_width(&plane_state->base.src) >> 16;
3011         int h = drm_rect_height(&plane_state->base.src) >> 16;
3012         int max_width = skl_max_plane_width(fb, 0, rotation);
3013         int max_height = 4096;
3014         u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3015
3016         if (w > max_width || h > max_height) {
3017                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3018                               w, h, max_width, max_height);
3019                 return -EINVAL;
3020         }
3021
3022         intel_add_fb_offsets(&x, &y, plane_state, 0);
3023         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3024         alignment = intel_surf_alignment(fb, 0);
3025
3026         /*
3027          * AUX surface offset is specified as the distance from the
3028          * main surface offset, and it must be non-negative. Make
3029          * sure that is what we will get.
3030          */
3031         if (offset > aux_offset)
3032                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3033                                                            offset, aux_offset & ~(alignment - 1));
3034
3035         /*
3036          * When using an X-tiled surface, the plane blows up
3037          * if the x offset + width exceed the stride.
3038          *
3039          * TODO: linear and Y-tiled seem fine, Yf untested,
3040          */
3041         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3042                 int cpp = fb->format->cpp[0];
3043
3044                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3045                         if (offset == 0) {
3046                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3047                                 return -EINVAL;
3048                         }
3049
3050                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3051                                                                    offset, offset - alignment);
3052                 }
3053         }
3054
3055         /*
3056          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3057          * they match with the main surface x/y offsets.
3058          */
3059         if (is_ccs_modifier(fb->modifier)) {
3060                 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3061                         if (offset == 0)
3062                                 break;
3063
3064                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3065                                                                    offset, offset - alignment);
3066                 }
3067
3068                 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3069                         DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3070                         return -EINVAL;
3071                 }
3072         }
3073
3074         plane_state->color_plane[0].offset = offset;
3075         plane_state->color_plane[0].x = x;
3076         plane_state->color_plane[0].y = y;
3077
3078         return 0;
3079 }
3080
3081 static int
3082 skl_check_nv12_surface(struct intel_plane_state *plane_state)
3083 {
3084         /* Display WA #1106 */
3085         if (plane_state->base.rotation !=
3086             (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
3087             plane_state->base.rotation != DRM_MODE_ROTATE_270)
3088                 return 0;
3089
3090         /*
3091          * src coordinates are rotated here.
3092          * We check height but report it as width
3093          */
3094         if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
3095                 DRM_DEBUG_KMS("src width must be multiple "
3096                               "of 4 for rotated NV12\n");
3097                 return -EINVAL;
3098         }
3099
3100         return 0;
3101 }
3102
3103 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3104 {
3105         const struct drm_framebuffer *fb = plane_state->base.fb;
3106         unsigned int rotation = plane_state->base.rotation;
3107         int max_width = skl_max_plane_width(fb, 1, rotation);
3108         int max_height = 4096;
3109         int x = plane_state->base.src.x1 >> 17;
3110         int y = plane_state->base.src.y1 >> 17;
3111         int w = drm_rect_width(&plane_state->base.src) >> 17;
3112         int h = drm_rect_height(&plane_state->base.src) >> 17;
3113         u32 offset;
3114
3115         intel_add_fb_offsets(&x, &y, plane_state, 1);
3116         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3117
3118         /* FIXME not quite sure how/if these apply to the chroma plane */
3119         if (w > max_width || h > max_height) {
3120                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3121                               w, h, max_width, max_height);
3122                 return -EINVAL;
3123         }
3124
3125         plane_state->color_plane[1].offset = offset;
3126         plane_state->color_plane[1].x = x;
3127         plane_state->color_plane[1].y = y;
3128
3129         return 0;
3130 }
3131
3132 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3133 {
3134         const struct drm_framebuffer *fb = plane_state->base.fb;
3135         int src_x = plane_state->base.src.x1 >> 16;
3136         int src_y = plane_state->base.src.y1 >> 16;
3137         int hsub = fb->format->hsub;
3138         int vsub = fb->format->vsub;
3139         int x = src_x / hsub;
3140         int y = src_y / vsub;
3141         u32 offset;
3142
3143         intel_add_fb_offsets(&x, &y, plane_state, 1);
3144         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3145
3146         plane_state->color_plane[1].offset = offset;
3147         plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3148         plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3149
3150         return 0;
3151 }
3152
3153 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3154 {
3155         const struct drm_framebuffer *fb = plane_state->base.fb;
3156         unsigned int rotation = plane_state->base.rotation;
3157         int ret;
3158
3159         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3160         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3161         plane_state->color_plane[1].stride = intel_fb_pitch(fb, 1, rotation);
3162
3163         ret = intel_plane_check_stride(plane_state);
3164         if (ret)
3165                 return ret;
3166
3167         /* HW only has 8 bits pixel precision, disable plane if invisible */
3168         if (!(plane_state->base.alpha >> 8))
3169                 plane_state->base.visible = false;
3170
3171         if (!plane_state->base.visible)
3172                 return 0;
3173
3174         /* Rotate src coordinates to match rotated GTT view */
3175         if (drm_rotation_90_or_270(rotation))
3176                 drm_rect_rotate(&plane_state->base.src,
3177                                 fb->width << 16, fb->height << 16,
3178                                 DRM_MODE_ROTATE_270);
3179
3180         /*
3181          * Handle the AUX surface first since
3182          * the main surface setup depends on it.
3183          */
3184         if (fb->format->format == DRM_FORMAT_NV12) {
3185                 ret = skl_check_nv12_surface(plane_state);
3186                 if (ret)
3187                         return ret;
3188                 ret = skl_check_nv12_aux_surface(plane_state);
3189                 if (ret)
3190                         return ret;
3191         } else if (is_ccs_modifier(fb->modifier)) {
3192                 ret = skl_check_ccs_aux_surface(plane_state);
3193                 if (ret)
3194                         return ret;
3195         } else {
3196                 plane_state->color_plane[1].offset = ~0xfff;
3197                 plane_state->color_plane[1].x = 0;
3198                 plane_state->color_plane[1].y = 0;
3199         }
3200
3201         ret = skl_check_main_surface(plane_state);
3202         if (ret)
3203                 return ret;
3204
3205         return 0;
3206 }
3207
3208 unsigned int
3209 i9xx_plane_max_stride(struct intel_plane *plane,
3210                       u32 pixel_format, u64 modifier,
3211                       unsigned int rotation)
3212 {
3213         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3214
3215         if (!HAS_GMCH_DISPLAY(dev_priv)) {
3216                 return 32*1024;
3217         } else if (INTEL_GEN(dev_priv) >= 4) {
3218                 if (modifier == I915_FORMAT_MOD_X_TILED)
3219                         return 16*1024;
3220                 else
3221                         return 32*1024;
3222         } else if (INTEL_GEN(dev_priv) >= 3) {
3223                 if (modifier == I915_FORMAT_MOD_X_TILED)
3224                         return 8*1024;
3225                 else
3226                         return 16*1024;
3227         } else {
3228                 if (plane->i9xx_plane == PLANE_C)
3229                         return 4*1024;
3230                 else
3231                         return 8*1024;
3232         }
3233 }
3234
3235 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3236                           const struct intel_plane_state *plane_state)
3237 {
3238         struct drm_i915_private *dev_priv =
3239                 to_i915(plane_state->base.plane->dev);
3240         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3241         const struct drm_framebuffer *fb = plane_state->base.fb;
3242         unsigned int rotation = plane_state->base.rotation;
3243         u32 dspcntr;
3244
3245         dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
3246
3247         if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
3248             IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
3249                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3250
3251         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3252                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3253
3254         if (INTEL_GEN(dev_priv) < 5)
3255                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3256
3257         switch (fb->format->format) {
3258         case DRM_FORMAT_C8:
3259                 dspcntr |= DISPPLANE_8BPP;
3260                 break;
3261         case DRM_FORMAT_XRGB1555:
3262                 dspcntr |= DISPPLANE_BGRX555;
3263                 break;
3264         case DRM_FORMAT_RGB565:
3265                 dspcntr |= DISPPLANE_BGRX565;
3266                 break;
3267         case DRM_FORMAT_XRGB8888:
3268                 dspcntr |= DISPPLANE_BGRX888;
3269                 break;
3270         case DRM_FORMAT_XBGR8888:
3271                 dspcntr |= DISPPLANE_RGBX888;
3272                 break;
3273         case DRM_FORMAT_XRGB2101010:
3274                 dspcntr |= DISPPLANE_BGRX101010;
3275                 break;
3276         case DRM_FORMAT_XBGR2101010:
3277                 dspcntr |= DISPPLANE_RGBX101010;
3278                 break;
3279         default:
3280                 MISSING_CASE(fb->format->format);
3281                 return 0;
3282         }
3283
3284         if (INTEL_GEN(dev_priv) >= 4 &&
3285             fb->modifier == I915_FORMAT_MOD_X_TILED)
3286                 dspcntr |= DISPPLANE_TILED;
3287
3288         if (rotation & DRM_MODE_ROTATE_180)
3289                 dspcntr |= DISPPLANE_ROTATE_180;
3290
3291         if (rotation & DRM_MODE_REFLECT_X)
3292                 dspcntr |= DISPPLANE_MIRROR;
3293
3294         return dspcntr;
3295 }
3296
3297 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3298 {
3299         struct drm_i915_private *dev_priv =
3300                 to_i915(plane_state->base.plane->dev);
3301         const struct drm_framebuffer *fb = plane_state->base.fb;
3302         unsigned int rotation = plane_state->base.rotation;
3303         int src_x = plane_state->base.src.x1 >> 16;
3304         int src_y = plane_state->base.src.y1 >> 16;
3305         u32 offset;
3306         int ret;
3307
3308         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3309         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3310
3311         ret = intel_plane_check_stride(plane_state);
3312         if (ret)
3313                 return ret;
3314
3315         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3316
3317         if (INTEL_GEN(dev_priv) >= 4)
3318                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3319                                                             plane_state, 0);
3320         else
3321                 offset = 0;
3322
3323         /* HSW/BDW do this automagically in hardware */
3324         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3325                 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3326                 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3327
3328                 if (rotation & DRM_MODE_ROTATE_180) {
3329                         src_x += src_w - 1;
3330                         src_y += src_h - 1;
3331                 } else if (rotation & DRM_MODE_REFLECT_X) {
3332                         src_x += src_w - 1;
3333                 }
3334         }
3335
3336         plane_state->color_plane[0].offset = offset;
3337         plane_state->color_plane[0].x = src_x;
3338         plane_state->color_plane[0].y = src_y;
3339
3340         return 0;
3341 }
3342
3343 static int
3344 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3345                  struct intel_plane_state *plane_state)
3346 {
3347         int ret;
3348
3349         ret = chv_plane_check_rotation(plane_state);
3350         if (ret)
3351                 return ret;
3352
3353         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3354                                                   &crtc_state->base,
3355                                                   DRM_PLANE_HELPER_NO_SCALING,
3356                                                   DRM_PLANE_HELPER_NO_SCALING,
3357                                                   false, true);
3358         if (ret)
3359                 return ret;
3360
3361         if (!plane_state->base.visible)
3362                 return 0;
3363
3364         ret = intel_plane_check_src_coordinates(plane_state);
3365         if (ret)
3366                 return ret;
3367
3368         ret = i9xx_check_plane_surface(plane_state);
3369         if (ret)
3370                 return ret;
3371
3372         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3373
3374         return 0;
3375 }
3376
3377 static void i9xx_update_plane(struct intel_plane *plane,
3378                               const struct intel_crtc_state *crtc_state,
3379                               const struct intel_plane_state *plane_state)
3380 {
3381         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3382         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3383         u32 linear_offset;
3384         u32 dspcntr = plane_state->ctl;
3385         i915_reg_t reg = DSPCNTR(i9xx_plane);
3386         int x = plane_state->color_plane[0].x;
3387         int y = plane_state->color_plane[0].y;
3388         unsigned long irqflags;
3389         u32 dspaddr_offset;
3390
3391         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3392
3393         if (INTEL_GEN(dev_priv) >= 4)
3394                 dspaddr_offset = plane_state->color_plane[0].offset;
3395         else
3396                 dspaddr_offset = linear_offset;
3397
3398         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3399
3400         if (INTEL_GEN(dev_priv) < 4) {
3401                 /* pipesrc and dspsize control the size that is scaled from,
3402                  * which should always be the user's requested size.
3403                  */
3404                 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3405                               ((crtc_state->pipe_src_h - 1) << 16) |
3406                               (crtc_state->pipe_src_w - 1));
3407                 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3408         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3409                 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3410                               ((crtc_state->pipe_src_h - 1) << 16) |
3411                               (crtc_state->pipe_src_w - 1));
3412                 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3413                 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3414         }
3415
3416         I915_WRITE_FW(reg, dspcntr);
3417
3418         I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3419         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3420                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3421                               intel_plane_ggtt_offset(plane_state) +
3422                               dspaddr_offset);
3423                 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3424         } else if (INTEL_GEN(dev_priv) >= 4) {
3425                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3426                               intel_plane_ggtt_offset(plane_state) +
3427                               dspaddr_offset);
3428                 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3429                 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3430         } else {
3431                 I915_WRITE_FW(DSPADDR(i9xx_plane),
3432                               intel_plane_ggtt_offset(plane_state) +
3433                               dspaddr_offset);
3434         }
3435         POSTING_READ_FW(reg);
3436
3437         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3438 }
3439
3440 static void i9xx_disable_plane(struct intel_plane *plane,
3441                                struct intel_crtc *crtc)
3442 {
3443         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3444         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3445         unsigned long irqflags;
3446
3447         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3448
3449         I915_WRITE_FW(DSPCNTR(i9xx_plane), 0);
3450         if (INTEL_GEN(dev_priv) >= 4)
3451                 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3452         else
3453                 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3454         POSTING_READ_FW(DSPCNTR(i9xx_plane));
3455
3456         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3457 }
3458
3459 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3460                                     enum pipe *pipe)
3461 {
3462         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3463         enum intel_display_power_domain power_domain;
3464         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3465         bool ret;
3466         u32 val;
3467
3468         /*
3469          * Not 100% correct for planes that can move between pipes,
3470          * but that's only the case for gen2-4 which don't have any
3471          * display power wells.
3472          */
3473         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3474         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3475                 return false;
3476
3477         val = I915_READ(DSPCNTR(i9xx_plane));
3478
3479         ret = val & DISPLAY_PLANE_ENABLE;
3480
3481         if (INTEL_GEN(dev_priv) >= 5)
3482                 *pipe = plane->pipe;
3483         else
3484                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3485                         DISPPLANE_SEL_PIPE_SHIFT;
3486
3487         intel_display_power_put(dev_priv, power_domain);
3488
3489         return ret;
3490 }
3491
3492 static u32
3493 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
3494 {
3495         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3496                 return 64;
3497         else
3498                 return intel_tile_width_bytes(fb, color_plane);
3499 }
3500
3501 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3502 {
3503         struct drm_device *dev = intel_crtc->base.dev;
3504         struct drm_i915_private *dev_priv = to_i915(dev);
3505
3506         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3507         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3508         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3509 }
3510
3511 /*
3512  * This function detaches (aka. unbinds) unused scalers in hardware
3513  */
3514 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
3515 {
3516         struct intel_crtc_scaler_state *scaler_state;
3517         int i;
3518
3519         scaler_state = &intel_crtc->config->scaler_state;
3520
3521         /* loop through and disable scalers that aren't in use */
3522         for (i = 0; i < intel_crtc->num_scalers; i++) {
3523                 if (!scaler_state->scalers[i].in_use)
3524                         skl_detach_scaler(intel_crtc, i);
3525         }
3526 }
3527
3528 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3529                      int color_plane)
3530 {
3531         const struct drm_framebuffer *fb = plane_state->base.fb;
3532         unsigned int rotation = plane_state->base.rotation;
3533         u32 stride = plane_state->color_plane[color_plane].stride;
3534
3535         if (color_plane >= fb->format->num_planes)
3536                 return 0;
3537
3538         /*
3539          * The stride is either expressed as a multiple of 64 bytes chunks for
3540          * linear buffers or in number of tiles for tiled buffers.
3541          */
3542         if (drm_rotation_90_or_270(rotation))
3543                 stride /= intel_tile_height(fb, color_plane);
3544         else
3545                 stride /= intel_fb_stride_alignment(fb, color_plane);
3546
3547         return stride;
3548 }
3549
3550 static u32 skl_plane_ctl_format(uint32_t pixel_format)
3551 {
3552         switch (pixel_format) {
3553         case DRM_FORMAT_C8:
3554                 return PLANE_CTL_FORMAT_INDEXED;
3555         case DRM_FORMAT_RGB565:
3556                 return PLANE_CTL_FORMAT_RGB_565;
3557         case DRM_FORMAT_XBGR8888:
3558         case DRM_FORMAT_ABGR8888:
3559                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3560         case DRM_FORMAT_XRGB8888:
3561         case DRM_FORMAT_ARGB8888:
3562                 return PLANE_CTL_FORMAT_XRGB_8888;
3563         case DRM_FORMAT_XRGB2101010:
3564                 return PLANE_CTL_FORMAT_XRGB_2101010;
3565         case DRM_FORMAT_XBGR2101010:
3566                 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3567         case DRM_FORMAT_YUYV:
3568                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3569         case DRM_FORMAT_YVYU:
3570                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3571         case DRM_FORMAT_UYVY:
3572                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3573         case DRM_FORMAT_VYUY:
3574                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3575         case DRM_FORMAT_NV12:
3576                 return PLANE_CTL_FORMAT_NV12;
3577         default:
3578                 MISSING_CASE(pixel_format);
3579         }
3580
3581         return 0;
3582 }
3583
3584 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
3585 {
3586         if (!plane_state->base.fb->format->has_alpha)
3587                 return PLANE_CTL_ALPHA_DISABLE;
3588
3589         switch (plane_state->base.pixel_blend_mode) {
3590         case DRM_MODE_BLEND_PIXEL_NONE:
3591                 return PLANE_CTL_ALPHA_DISABLE;
3592         case DRM_MODE_BLEND_PREMULTI:
3593                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3594         case DRM_MODE_BLEND_COVERAGE:
3595                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
3596         default:
3597                 MISSING_CASE(plane_state->base.pixel_blend_mode);
3598                 return PLANE_CTL_ALPHA_DISABLE;
3599         }
3600 }
3601
3602 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
3603 {
3604         if (!plane_state->base.fb->format->has_alpha)
3605                 return PLANE_COLOR_ALPHA_DISABLE;
3606
3607         switch (plane_state->base.pixel_blend_mode) {
3608         case DRM_MODE_BLEND_PIXEL_NONE:
3609                 return PLANE_COLOR_ALPHA_DISABLE;
3610         case DRM_MODE_BLEND_PREMULTI:
3611                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
3612         case DRM_MODE_BLEND_COVERAGE:
3613                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
3614         default:
3615                 MISSING_CASE(plane_state->base.pixel_blend_mode);
3616                 return PLANE_COLOR_ALPHA_DISABLE;
3617         }
3618 }
3619
3620 static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3621 {
3622         switch (fb_modifier) {
3623         case DRM_FORMAT_MOD_LINEAR:
3624                 break;
3625         case I915_FORMAT_MOD_X_TILED:
3626                 return PLANE_CTL_TILED_X;
3627         case I915_FORMAT_MOD_Y_TILED:
3628                 return PLANE_CTL_TILED_Y;
3629         case I915_FORMAT_MOD_Y_TILED_CCS:
3630                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3631         case I915_FORMAT_MOD_Yf_TILED:
3632                 return PLANE_CTL_TILED_YF;
3633         case I915_FORMAT_MOD_Yf_TILED_CCS:
3634                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3635         default:
3636                 MISSING_CASE(fb_modifier);
3637         }
3638
3639         return 0;
3640 }
3641
3642 static u32 skl_plane_ctl_rotate(unsigned int rotate)
3643 {
3644         switch (rotate) {
3645         case DRM_MODE_ROTATE_0:
3646                 break;
3647         /*
3648          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
3649          * while i915 HW rotation is clockwise, thats why this swapping.
3650          */
3651         case DRM_MODE_ROTATE_90:
3652                 return PLANE_CTL_ROTATE_270;
3653         case DRM_MODE_ROTATE_180:
3654                 return PLANE_CTL_ROTATE_180;
3655         case DRM_MODE_ROTATE_270:
3656                 return PLANE_CTL_ROTATE_90;
3657         default:
3658                 MISSING_CASE(rotate);
3659         }
3660
3661         return 0;
3662 }
3663
3664 static u32 cnl_plane_ctl_flip(unsigned int reflect)
3665 {
3666         switch (reflect) {
3667         case 0:
3668                 break;
3669         case DRM_MODE_REFLECT_X:
3670                 return PLANE_CTL_FLIP_HORIZONTAL;
3671         case DRM_MODE_REFLECT_Y:
3672         default:
3673                 MISSING_CASE(reflect);
3674         }
3675
3676         return 0;
3677 }
3678
3679 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3680                   const struct intel_plane_state *plane_state)
3681 {
3682         struct drm_i915_private *dev_priv =
3683                 to_i915(plane_state->base.plane->dev);
3684         const struct drm_framebuffer *fb = plane_state->base.fb;
3685         unsigned int rotation = plane_state->base.rotation;
3686         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
3687         u32 plane_ctl;
3688
3689         plane_ctl = PLANE_CTL_ENABLE;
3690
3691         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
3692                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
3693                 plane_ctl |=
3694                         PLANE_CTL_PIPE_GAMMA_ENABLE |
3695                         PLANE_CTL_PIPE_CSC_ENABLE |
3696                         PLANE_CTL_PLANE_GAMMA_DISABLE;
3697
3698                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3699                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
3700
3701                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3702                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
3703         }
3704
3705         plane_ctl |= skl_plane_ctl_format(fb->format->format);
3706         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
3707         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
3708
3709         if (INTEL_GEN(dev_priv) >= 10)
3710                 plane_ctl |= cnl_plane_ctl_flip(rotation &
3711                                                 DRM_MODE_REFLECT_MASK);
3712
3713         if (key->flags & I915_SET_COLORKEY_DESTINATION)
3714                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3715         else if (key->flags & I915_SET_COLORKEY_SOURCE)
3716                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3717
3718         return plane_ctl;
3719 }
3720
3721 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3722                         const struct intel_plane_state *plane_state)
3723 {
3724         struct drm_i915_private *dev_priv =
3725                 to_i915(plane_state->base.plane->dev);
3726         const struct drm_framebuffer *fb = plane_state->base.fb;
3727         u32 plane_color_ctl = 0;
3728
3729         if (INTEL_GEN(dev_priv) < 11) {
3730                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
3731                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3732         }
3733         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
3734         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
3735
3736         if (fb->format->is_yuv) {
3737                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3738                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3739                 else
3740                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
3741
3742                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3743                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
3744         }
3745
3746         return plane_color_ctl;
3747 }
3748
3749 static int
3750 __intel_display_resume(struct drm_device *dev,
3751                        struct drm_atomic_state *state,
3752                        struct drm_modeset_acquire_ctx *ctx)
3753 {
3754         struct drm_crtc_state *crtc_state;
3755         struct drm_crtc *crtc;
3756         int i, ret;
3757
3758         intel_modeset_setup_hw_state(dev, ctx);
3759         i915_redisable_vga(to_i915(dev));
3760
3761         if (!state)
3762                 return 0;
3763
3764         /*
3765          * We've duplicated the state, pointers to the old state are invalid.
3766          *
3767          * Don't attempt to use the old state until we commit the duplicated state.
3768          */
3769         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
3770                 /*
3771                  * Force recalculation even if we restore
3772                  * current state. With fast modeset this may not result
3773                  * in a modeset when the state is compatible.
3774                  */
3775                 crtc_state->mode_changed = true;
3776         }
3777
3778         /* ignore any reset values/BIOS leftovers in the WM registers */
3779         if (!HAS_GMCH_DISPLAY(to_i915(dev)))
3780                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
3781
3782         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
3783
3784         WARN_ON(ret == -EDEADLK);
3785         return ret;
3786 }
3787
3788 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3789 {
3790         return intel_has_gpu_reset(dev_priv) &&
3791                 INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
3792 }
3793
3794 void intel_prepare_reset(struct drm_i915_private *dev_priv)
3795 {
3796         struct drm_device *dev = &dev_priv->drm;
3797         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3798         struct drm_atomic_state *state;
3799         int ret;
3800
3801         /* reset doesn't touch the display */
3802         if (!i915_modparams.force_reset_modeset_test &&
3803             !gpu_reset_clobbers_display(dev_priv))
3804                 return;
3805
3806         /* We have a modeset vs reset deadlock, defensively unbreak it. */
3807         set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3808         wake_up_all(&dev_priv->gpu_error.wait_queue);
3809
3810         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
3811                 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
3812                 i915_gem_set_wedged(dev_priv);
3813         }
3814
3815         /*
3816          * Need mode_config.mutex so that we don't
3817          * trample ongoing ->detect() and whatnot.
3818          */
3819         mutex_lock(&dev->mode_config.mutex);
3820         drm_modeset_acquire_init(ctx, 0);
3821         while (1) {
3822                 ret = drm_modeset_lock_all_ctx(dev, ctx);
3823                 if (ret != -EDEADLK)
3824                         break;
3825
3826                 drm_modeset_backoff(ctx);
3827         }
3828         /*
3829          * Disabling the crtcs gracefully seems nicer. Also the
3830          * g33 docs say we should at least disable all the planes.
3831          */
3832         state = drm_atomic_helper_duplicate_state(dev, ctx);
3833         if (IS_ERR(state)) {
3834                 ret = PTR_ERR(state);
3835                 DRM_ERROR("Duplicating state failed with %i\n", ret);
3836                 return;
3837         }
3838
3839         ret = drm_atomic_helper_disable_all(dev, ctx);
3840         if (ret) {
3841                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
3842                 drm_atomic_state_put(state);
3843                 return;
3844         }
3845
3846         dev_priv->modeset_restore_state = state;
3847         state->acquire_ctx = ctx;
3848 }
3849
3850 void intel_finish_reset(struct drm_i915_private *dev_priv)
3851 {
3852         struct drm_device *dev = &dev_priv->drm;
3853         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3854         struct drm_atomic_state *state;
3855         int ret;
3856
3857         /* reset doesn't touch the display */
3858         if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
3859                 return;
3860
3861         state = fetch_and_zero(&dev_priv->modeset_restore_state);
3862         if (!state)
3863                 goto unlock;
3864
3865         /* reset doesn't touch the display */
3866         if (!gpu_reset_clobbers_display(dev_priv)) {
3867                 /* for testing only restore the display */
3868                 ret = __intel_display_resume(dev, state, ctx);
3869                 if (ret)
3870                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3871         } else {
3872                 /*
3873                  * The display has been reset as well,
3874                  * so need a full re-initialization.
3875                  */
3876                 intel_runtime_pm_disable_interrupts(dev_priv);
3877                 intel_runtime_pm_enable_interrupts(dev_priv);
3878
3879                 intel_pps_unlock_regs_wa(dev_priv);
3880                 intel_modeset_init_hw(dev);
3881                 intel_init_clock_gating(dev_priv);
3882
3883                 spin_lock_irq(&dev_priv->irq_lock);
3884                 if (dev_priv->display.hpd_irq_setup)
3885                         dev_priv->display.hpd_irq_setup(dev_priv);
3886                 spin_unlock_irq(&dev_priv->irq_lock);
3887
3888                 ret = __intel_display_resume(dev, state, ctx);
3889                 if (ret)
3890                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3891
3892                 intel_hpd_init(dev_priv);
3893         }
3894
3895         drm_atomic_state_put(state);
3896 unlock:
3897         drm_modeset_drop_locks(ctx);
3898         drm_modeset_acquire_fini(ctx);
3899         mutex_unlock(&dev->mode_config.mutex);
3900
3901         clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3902 }
3903
3904 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
3905                                      const struct intel_crtc_state *new_crtc_state)
3906 {
3907         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
3908         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3909
3910         /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3911         crtc->base.mode = new_crtc_state->base.mode;
3912
3913         /*
3914          * Update pipe size and adjust fitter if needed: the reason for this is
3915          * that in compute_mode_changes we check the native mode (not the pfit
3916          * mode) to see if we can flip rather than do a full mode set. In the
3917          * fastboot case, we'll flip, but if we don't update the pipesrc and
3918          * pfit state, we'll end up with a big fb scanned out into the wrong
3919          * sized surface.
3920          */
3921
3922         I915_WRITE(PIPESRC(crtc->pipe),
3923                    ((new_crtc_state->pipe_src_w - 1) << 16) |
3924                    (new_crtc_state->pipe_src_h - 1));
3925
3926         /* on skylake this is done by detaching scalers */
3927         if (INTEL_GEN(dev_priv) >= 9) {
3928                 skl_detach_scalers(crtc);
3929
3930                 if (new_crtc_state->pch_pfit.enabled)
3931                         skylake_pfit_enable(new_crtc_state);
3932         } else if (HAS_PCH_SPLIT(dev_priv)) {
3933                 if (new_crtc_state->pch_pfit.enabled)
3934                         ironlake_pfit_enable(new_crtc_state);
3935                 else if (old_crtc_state->pch_pfit.enabled)
3936                         ironlake_pfit_disable(old_crtc_state);
3937         }
3938 }
3939
3940 static void intel_fdi_normal_train(struct intel_crtc *crtc)
3941 {
3942         struct drm_device *dev = crtc->base.dev;
3943         struct drm_i915_private *dev_priv = to_i915(dev);
3944         int pipe = crtc->pipe;
3945         i915_reg_t reg;
3946         u32 temp;
3947
3948         /* enable normal train */
3949         reg = FDI_TX_CTL(pipe);
3950         temp = I915_READ(reg);
3951         if (IS_IVYBRIDGE(dev_priv)) {
3952                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3953                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3954         } else {
3955                 temp &= ~FDI_LINK_TRAIN_NONE;
3956                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3957         }
3958         I915_WRITE(reg, temp);
3959
3960         reg = FDI_RX_CTL(pipe);
3961         temp = I915_READ(reg);
3962         if (HAS_PCH_CPT(dev_priv)) {
3963                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3964                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3965         } else {
3966                 temp &= ~FDI_LINK_TRAIN_NONE;
3967                 temp |= FDI_LINK_TRAIN_NONE;
3968         }
3969         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3970
3971         /* wait one idle pattern time */
3972         POSTING_READ(reg);
3973         udelay(1000);
3974
3975         /* IVB wants error correction enabled */
3976         if (IS_IVYBRIDGE(dev_priv))
3977                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3978                            FDI_FE_ERRC_ENABLE);
3979 }
3980
3981 /* The FDI link training functions for ILK/Ibexpeak. */
3982 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
3983                                     const struct intel_crtc_state *crtc_state)
3984 {
3985         struct drm_device *dev = crtc->base.dev;
3986         struct drm_i915_private *dev_priv = to_i915(dev);
3987         int pipe = crtc->pipe;
3988         i915_reg_t reg;
3989         u32 temp, tries;
3990
3991         /* FDI needs bits from pipe first */
3992         assert_pipe_enabled(dev_priv, pipe);
3993
3994         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3995            for train result */
3996         reg = FDI_RX_IMR(pipe);
3997         temp = I915_READ(reg);
3998         temp &= ~FDI_RX_SYMBOL_LOCK;
3999         temp &= ~FDI_RX_BIT_LOCK;
4000         I915_WRITE(reg, temp);
4001         I915_READ(reg);
4002         udelay(150);
4003
4004         /* enable CPU FDI TX and PCH FDI RX */
4005         reg = FDI_TX_CTL(pipe);
4006         temp = I915_READ(reg);
4007         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4008         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4009         temp &= ~FDI_LINK_TRAIN_NONE;
4010         temp |= FDI_LINK_TRAIN_PATTERN_1;
4011         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4012
4013         reg = FDI_RX_CTL(pipe);
4014         temp = I915_READ(reg);
4015         temp &= ~FDI_LINK_TRAIN_NONE;
4016         temp |= FDI_LINK_TRAIN_PATTERN_1;
4017         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4018
4019         POSTING_READ(reg);
4020         udelay(150);
4021
4022         /* Ironlake workaround, enable clock pointer after FDI enable*/
4023         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4024         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4025                    FDI_RX_PHASE_SYNC_POINTER_EN);
4026
4027         reg = FDI_RX_IIR(pipe);
4028         for (tries = 0; tries < 5; tries++) {
4029                 temp = I915_READ(reg);
4030                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4031
4032                 if ((temp & FDI_RX_BIT_LOCK)) {
4033                         DRM_DEBUG_KMS("FDI train 1 done.\n");
4034                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4035                         break;
4036                 }
4037         }
4038         if (tries == 5)
4039                 DRM_ERROR("FDI train 1 fail!\n");
4040
4041         /* Train 2 */
4042         reg = FDI_TX_CTL(pipe);
4043         temp = I915_READ(reg);
4044         temp &= ~FDI_LINK_TRAIN_NONE;
4045         temp |= FDI_LINK_TRAIN_PATTERN_2;
4046         I915_WRITE(reg, temp);
4047
4048         reg = FDI_RX_CTL(pipe);
4049         temp = I915_READ(reg);
4050         temp &= ~FDI_LINK_TRAIN_NONE;
4051         temp |= FDI_LINK_TRAIN_PATTERN_2;
4052         I915_WRITE(reg, temp);
4053
4054         POSTING_READ(reg);
4055         udelay(150);
4056
4057         reg = FDI_RX_IIR(pipe);
4058         for (tries = 0; tries < 5; tries++) {
4059                 temp = I915_READ(reg);
4060                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4061
4062                 if (temp & FDI_RX_SYMBOL_LOCK) {
4063                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4064                         DRM_DEBUG_KMS("FDI train 2 done.\n");
4065                         break;
4066                 }
4067         }
4068         if (tries == 5)
4069                 DRM_ERROR("FDI train 2 fail!\n");
4070
4071         DRM_DEBUG_KMS("FDI train done\n");
4072
4073 }
4074
4075 static const int snb_b_fdi_train_param[] = {
4076         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4077         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4078         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4079         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4080 };
4081
4082 /* The FDI link training functions for SNB/Cougarpoint. */
4083 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4084                                 const struct intel_crtc_state *crtc_state)
4085 {
4086         struct drm_device *dev = crtc->base.dev;
4087         struct drm_i915_private *dev_priv = to_i915(dev);
4088         int pipe = crtc->pipe;
4089         i915_reg_t reg;
4090         u32 temp, i, retry;
4091
4092         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4093            for train result */
4094         reg = FDI_RX_IMR(pipe);
4095         temp = I915_READ(reg);
4096         temp &= ~FDI_RX_SYMBOL_LOCK;
4097         temp &= ~FDI_RX_BIT_LOCK;
4098         I915_WRITE(reg, temp);
4099
4100         POSTING_READ(reg);
4101         udelay(150);
4102
4103         /* enable CPU FDI TX and PCH FDI RX */
4104         reg = FDI_TX_CTL(pipe);
4105         temp = I915_READ(reg);
4106         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4107         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4108         temp &= ~FDI_LINK_TRAIN_NONE;
4109         temp |= FDI_LINK_TRAIN_PATTERN_1;
4110         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4111         /* SNB-B */
4112         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4113         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4114
4115         I915_WRITE(FDI_RX_MISC(pipe),
4116                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4117
4118         reg = FDI_RX_CTL(pipe);
4119         temp = I915_READ(reg);
4120         if (HAS_PCH_CPT(dev_priv)) {
4121                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4122                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4123         } else {
4124                 temp &= ~FDI_LINK_TRAIN_NONE;
4125                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4126         }
4127         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4128
4129         POSTING_READ(reg);
4130         udelay(150);
4131
4132         for (i = 0; i < 4; i++) {
4133                 reg = FDI_TX_CTL(pipe);
4134                 temp = I915_READ(reg);
4135                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4136                 temp |= snb_b_fdi_train_param[i];
4137                 I915_WRITE(reg, temp);
4138
4139                 POSTING_READ(reg);
4140                 udelay(500);
4141
4142                 for (retry = 0; retry < 5; retry++) {
4143                         reg = FDI_RX_IIR(pipe);
4144                         temp = I915_READ(reg);
4145                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4146                         if (temp & FDI_RX_BIT_LOCK) {
4147                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4148                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
4149                                 break;
4150                         }
4151                         udelay(50);
4152                 }
4153                 if (retry < 5)
4154                         break;
4155         }
4156         if (i == 4)
4157                 DRM_ERROR("FDI train 1 fail!\n");
4158
4159         /* Train 2 */
4160         reg = FDI_TX_CTL(pipe);
4161         temp = I915_READ(reg);
4162         temp &= ~FDI_LINK_TRAIN_NONE;
4163         temp |= FDI_LINK_TRAIN_PATTERN_2;
4164         if (IS_GEN6(dev_priv)) {
4165                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4166                 /* SNB-B */
4167                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4168         }
4169         I915_WRITE(reg, temp);
4170
4171         reg = FDI_RX_CTL(pipe);
4172         temp = I915_READ(reg);
4173         if (HAS_PCH_CPT(dev_priv)) {
4174                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4175                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4176         } else {
4177                 temp &= ~FDI_LINK_TRAIN_NONE;
4178                 temp |= FDI_LINK_TRAIN_PATTERN_2;
4179         }
4180         I915_WRITE(reg, temp);
4181
4182         POSTING_READ(reg);
4183         udelay(150);
4184
4185         for (i = 0; i < 4; i++) {
4186                 reg = FDI_TX_CTL(pipe);
4187                 temp = I915_READ(reg);
4188                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4189                 temp |= snb_b_fdi_train_param[i];
4190                 I915_WRITE(reg, temp);
4191
4192                 POSTING_READ(reg);
4193                 udelay(500);
4194
4195                 for (retry = 0; retry < 5; retry++) {
4196                         reg = FDI_RX_IIR(pipe);
4197                         temp = I915_READ(reg);
4198                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4199                         if (temp & FDI_RX_SYMBOL_LOCK) {
4200                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4201                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
4202                                 break;
4203                         }
4204                         udelay(50);
4205                 }
4206                 if (retry < 5)
4207                         break;
4208         }
4209         if (i == 4)
4210                 DRM_ERROR("FDI train 2 fail!\n");
4211
4212         DRM_DEBUG_KMS("FDI train done.\n");
4213 }
4214
4215 /* Manual link training for Ivy Bridge A0 parts */
4216 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4217                                       const struct intel_crtc_state *crtc_state)
4218 {
4219         struct drm_device *dev = crtc->base.dev;
4220         struct drm_i915_private *dev_priv = to_i915(dev);
4221         int pipe = crtc->pipe;
4222         i915_reg_t reg;
4223         u32 temp, i, j;
4224
4225         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4226            for train result */
4227         reg = FDI_RX_IMR(pipe);
4228         temp = I915_READ(reg);
4229         temp &= ~FDI_RX_SYMBOL_LOCK;
4230         temp &= ~FDI_RX_BIT_LOCK;
4231         I915_WRITE(reg, temp);
4232
4233         POSTING_READ(reg);
4234         udelay(150);
4235
4236         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4237                       I915_READ(FDI_RX_IIR(pipe)));
4238
4239         /* Try each vswing and preemphasis setting twice before moving on */
4240         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4241                 /* disable first in case we need to retry */
4242                 reg = FDI_TX_CTL(pipe);
4243                 temp = I915_READ(reg);
4244                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4245                 temp &= ~FDI_TX_ENABLE;
4246                 I915_WRITE(reg, temp);
4247
4248                 reg = FDI_RX_CTL(pipe);
4249                 temp = I915_READ(reg);
4250                 temp &= ~FDI_LINK_TRAIN_AUTO;
4251                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4252                 temp &= ~FDI_RX_ENABLE;
4253                 I915_WRITE(reg, temp);
4254
4255                 /* enable CPU FDI TX and PCH FDI RX */
4256                 reg = FDI_TX_CTL(pipe);
4257                 temp = I915_READ(reg);
4258                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4259                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4260                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4261                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4262                 temp |= snb_b_fdi_train_param[j/2];
4263                 temp |= FDI_COMPOSITE_SYNC;
4264                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4265
4266                 I915_WRITE(FDI_RX_MISC(pipe),
4267                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4268
4269                 reg = FDI_RX_CTL(pipe);
4270                 temp = I915_READ(reg);
4271                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4272                 temp |= FDI_COMPOSITE_SYNC;
4273                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4274
4275                 POSTING_READ(reg);
4276                 udelay(1); /* should be 0.5us */
4277
4278                 for (i = 0; i < 4; i++) {
4279                         reg = FDI_RX_IIR(pipe);
4280                         temp = I915_READ(reg);
4281                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4282
4283                         if (temp & FDI_RX_BIT_LOCK ||
4284                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4285                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4286                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4287                                               i);
4288                                 break;
4289                         }
4290                         udelay(1); /* should be 0.5us */
4291                 }
4292                 if (i == 4) {
4293                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4294                         continue;
4295                 }
4296
4297                 /* Train 2 */
4298                 reg = FDI_TX_CTL(pipe);
4299                 temp = I915_READ(reg);
4300                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4301                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4302                 I915_WRITE(reg, temp);
4303
4304                 reg = FDI_RX_CTL(pipe);
4305                 temp = I915_READ(reg);
4306                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4307                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4308                 I915_WRITE(reg, temp);
4309
4310                 POSTING_READ(reg);
4311                 udelay(2); /* should be 1.5us */
4312
4313                 for (i = 0; i < 4; i++) {
4314                         reg = FDI_RX_IIR(pipe);
4315                         temp = I915_READ(reg);
4316                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4317
4318                         if (temp & FDI_RX_SYMBOL_LOCK ||
4319                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4320                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4321                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4322                                               i);
4323                                 goto train_done;
4324                         }
4325                         udelay(2); /* should be 1.5us */
4326                 }
4327                 if (i == 4)
4328                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4329         }
4330
4331 train_done:
4332         DRM_DEBUG_KMS("FDI train done.\n");
4333 }
4334
4335 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
4336 {
4337         struct drm_device *dev = intel_crtc->base.dev;
4338         struct drm_i915_private *dev_priv = to_i915(dev);
4339         int pipe = intel_crtc->pipe;
4340         i915_reg_t reg;
4341         u32 temp;
4342
4343         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4344         reg = FDI_RX_CTL(pipe);
4345         temp = I915_READ(reg);
4346         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4347         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
4348         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4349         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4350
4351         POSTING_READ(reg);
4352         udelay(200);
4353
4354         /* Switch from Rawclk to PCDclk */
4355         temp = I915_READ(reg);
4356         I915_WRITE(reg, temp | FDI_PCDCLK);
4357
4358         POSTING_READ(reg);
4359         udelay(200);
4360
4361         /* Enable CPU FDI TX PLL, always on for Ironlake */
4362         reg = FDI_TX_CTL(pipe);
4363         temp = I915_READ(reg);
4364         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4365                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4366
4367                 POSTING_READ(reg);
4368                 udelay(100);
4369         }
4370 }
4371
4372 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4373 {
4374         struct drm_device *dev = intel_crtc->base.dev;
4375         struct drm_i915_private *dev_priv = to_i915(dev);
4376         int pipe = intel_crtc->pipe;
4377         i915_reg_t reg;
4378         u32 temp;
4379
4380         /* Switch from PCDclk to Rawclk */
4381         reg = FDI_RX_CTL(pipe);
4382         temp = I915_READ(reg);
4383         I915_WRITE(reg, temp & ~FDI_PCDCLK);
4384
4385         /* Disable CPU FDI TX PLL */
4386         reg = FDI_TX_CTL(pipe);
4387         temp = I915_READ(reg);
4388         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4389
4390         POSTING_READ(reg);
4391         udelay(100);
4392
4393         reg = FDI_RX_CTL(pipe);
4394         temp = I915_READ(reg);
4395         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4396
4397         /* Wait for the clocks to turn off. */
4398         POSTING_READ(reg);
4399         udelay(100);
4400 }
4401
4402 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4403 {
4404         struct drm_device *dev = crtc->dev;
4405         struct drm_i915_private *dev_priv = to_i915(dev);
4406         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4407         int pipe = intel_crtc->pipe;
4408         i915_reg_t reg;
4409         u32 temp;
4410
4411         /* disable CPU FDI tx and PCH FDI rx */
4412         reg = FDI_TX_CTL(pipe);
4413         temp = I915_READ(reg);
4414         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4415         POSTING_READ(reg);
4416
4417         reg = FDI_RX_CTL(pipe);
4418         temp = I915_READ(reg);
4419         temp &= ~(0x7 << 16);
4420         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4421         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4422
4423         POSTING_READ(reg);
4424         udelay(100);
4425
4426         /* Ironlake workaround, disable clock pointer after downing FDI */
4427         if (HAS_PCH_IBX(dev_priv))
4428                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4429
4430         /* still set train pattern 1 */
4431         reg = FDI_TX_CTL(pipe);
4432         temp = I915_READ(reg);
4433         temp &= ~FDI_LINK_TRAIN_NONE;
4434         temp |= FDI_LINK_TRAIN_PATTERN_1;
4435         I915_WRITE(reg, temp);
4436
4437         reg = FDI_RX_CTL(pipe);
4438         temp = I915_READ(reg);
4439         if (HAS_PCH_CPT(dev_priv)) {
4440                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4441                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4442         } else {
4443                 temp &= ~FDI_LINK_TRAIN_NONE;
4444                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4445         }
4446         /* BPC in FDI rx is consistent with that in PIPECONF */
4447         temp &= ~(0x07 << 16);
4448         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4449         I915_WRITE(reg, temp);
4450
4451         POSTING_READ(reg);
4452         udelay(100);
4453 }
4454
4455 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4456 {
4457         struct drm_crtc *crtc;
4458         bool cleanup_done;
4459
4460         drm_for_each_crtc(crtc, &dev_priv->drm) {
4461                 struct drm_crtc_commit *commit;
4462                 spin_lock(&crtc->commit_lock);
4463                 commit = list_first_entry_or_null(&crtc->commit_list,
4464                                                   struct drm_crtc_commit, commit_entry);
4465                 cleanup_done = commit ?
4466                         try_wait_for_completion(&commit->cleanup_done) : true;
4467                 spin_unlock(&crtc->commit_lock);
4468
4469                 if (cleanup_done)
4470                         continue;
4471
4472                 drm_crtc_wait_one_vblank(crtc);
4473
4474                 return true;
4475         }
4476
4477         return false;
4478 }
4479
4480 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4481 {
4482         u32 temp;
4483
4484         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4485
4486         mutex_lock(&dev_priv->sb_lock);
4487
4488         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4489         temp |= SBI_SSCCTL_DISABLE;
4490         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4491
4492         mutex_unlock(&dev_priv->sb_lock);
4493 }
4494
4495 /* Program iCLKIP clock to the desired frequency */
4496 static void lpt_program_iclkip(struct intel_crtc *crtc)
4497 {
4498         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4499         int clock = crtc->config->base.adjusted_mode.crtc_clock;
4500         u32 divsel, phaseinc, auxdiv, phasedir = 0;
4501         u32 temp;
4502
4503         lpt_disable_iclkip(dev_priv);
4504
4505         /* The iCLK virtual clock root frequency is in MHz,
4506          * but the adjusted_mode->crtc_clock in in KHz. To get the
4507          * divisors, it is necessary to divide one by another, so we
4508          * convert the virtual clock precision to KHz here for higher
4509          * precision.
4510          */
4511         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4512                 u32 iclk_virtual_root_freq = 172800 * 1000;
4513                 u32 iclk_pi_range = 64;
4514                 u32 desired_divisor;
4515
4516                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4517                                                     clock << auxdiv);
4518                 divsel = (desired_divisor / iclk_pi_range) - 2;
4519                 phaseinc = desired_divisor % iclk_pi_range;
4520
4521                 /*
4522                  * Near 20MHz is a corner case which is
4523                  * out of range for the 7-bit divisor
4524                  */
4525                 if (divsel <= 0x7f)
4526                         break;
4527         }
4528
4529         /* This should not happen with any sane values */
4530         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4531                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4532         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4533                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4534
4535         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4536                         clock,
4537                         auxdiv,
4538                         divsel,
4539                         phasedir,
4540                         phaseinc);
4541
4542         mutex_lock(&dev_priv->sb_lock);
4543
4544         /* Program SSCDIVINTPHASE6 */
4545         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4546         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4547         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4548         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4549         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4550         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4551         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4552         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4553
4554         /* Program SSCAUXDIV */
4555         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4556         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4557         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4558         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4559
4560         /* Enable modulator and associated divider */
4561         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4562         temp &= ~SBI_SSCCTL_DISABLE;
4563         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4564
4565         mutex_unlock(&dev_priv->sb_lock);
4566
4567         /* Wait for initialization time */
4568         udelay(24);
4569
4570         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4571 }
4572
4573 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4574 {
4575         u32 divsel, phaseinc, auxdiv;
4576         u32 iclk_virtual_root_freq = 172800 * 1000;
4577         u32 iclk_pi_range = 64;
4578         u32 desired_divisor;
4579         u32 temp;
4580
4581         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4582                 return 0;
4583
4584         mutex_lock(&dev_priv->sb_lock);
4585
4586         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4587         if (temp & SBI_SSCCTL_DISABLE) {
4588                 mutex_unlock(&dev_priv->sb_lock);
4589                 return 0;
4590         }
4591
4592         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4593         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4594                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4595         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4596                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4597
4598         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4599         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4600                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4601
4602         mutex_unlock(&dev_priv->sb_lock);
4603
4604         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4605
4606         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4607                                  desired_divisor << auxdiv);
4608 }
4609
4610 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4611                                                 enum pipe pch_transcoder)
4612 {
4613         struct drm_device *dev = crtc->base.dev;
4614         struct drm_i915_private *dev_priv = to_i915(dev);
4615         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4616
4617         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4618                    I915_READ(HTOTAL(cpu_transcoder)));
4619         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4620                    I915_READ(HBLANK(cpu_transcoder)));
4621         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4622                    I915_READ(HSYNC(cpu_transcoder)));
4623
4624         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4625                    I915_READ(VTOTAL(cpu_transcoder)));
4626         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4627                    I915_READ(VBLANK(cpu_transcoder)));
4628         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4629                    I915_READ(VSYNC(cpu_transcoder)));
4630         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4631                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
4632 }
4633
4634 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4635 {
4636         struct drm_i915_private *dev_priv = to_i915(dev);
4637         uint32_t temp;
4638
4639         temp = I915_READ(SOUTH_CHICKEN1);
4640         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4641                 return;
4642
4643         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4644         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4645
4646         temp &= ~FDI_BC_BIFURCATION_SELECT;
4647         if (enable)
4648                 temp |= FDI_BC_BIFURCATION_SELECT;
4649
4650         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4651         I915_WRITE(SOUTH_CHICKEN1, temp);
4652         POSTING_READ(SOUTH_CHICKEN1);
4653 }
4654
4655 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4656 {
4657         struct drm_device *dev = intel_crtc->base.dev;
4658
4659         switch (intel_crtc->pipe) {
4660         case PIPE_A:
4661                 break;
4662         case PIPE_B:
4663                 if (intel_crtc->config->fdi_lanes > 2)
4664                         cpt_set_fdi_bc_bifurcation(dev, false);
4665                 else
4666                         cpt_set_fdi_bc_bifurcation(dev, true);
4667
4668                 break;
4669         case PIPE_C:
4670                 cpt_set_fdi_bc_bifurcation(dev, true);
4671
4672                 break;
4673         default:
4674                 BUG();
4675         }
4676 }
4677
4678 /*
4679  * Finds the encoder associated with the given CRTC. This can only be
4680  * used when we know that the CRTC isn't feeding multiple encoders!
4681  */
4682 static struct intel_encoder *
4683 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
4684                            const struct intel_crtc_state *crtc_state)
4685 {
4686         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4687         const struct drm_connector_state *connector_state;
4688         const struct drm_connector *connector;
4689         struct intel_encoder *encoder = NULL;
4690         int num_encoders = 0;
4691         int i;
4692
4693         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4694                 if (connector_state->crtc != &crtc->base)
4695                         continue;
4696
4697                 encoder = to_intel_encoder(connector_state->best_encoder);
4698                 num_encoders++;
4699         }
4700
4701         WARN(num_encoders != 1, "%d encoders for pipe %c\n",
4702              num_encoders, pipe_name(crtc->pipe));
4703
4704         return encoder;
4705 }
4706
4707 /*
4708  * Enable PCH resources required for PCH ports:
4709  *   - PCH PLLs
4710  *   - FDI training & RX/TX
4711  *   - update transcoder timings
4712  *   - DP transcoding bits
4713  *   - transcoder
4714  */
4715 static void ironlake_pch_enable(const struct intel_atomic_state *state,
4716                                 const struct intel_crtc_state *crtc_state)
4717 {
4718         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4719         struct drm_device *dev = crtc->base.dev;
4720         struct drm_i915_private *dev_priv = to_i915(dev);
4721         int pipe = crtc->pipe;
4722         u32 temp;
4723
4724         assert_pch_transcoder_disabled(dev_priv, pipe);
4725
4726         if (IS_IVYBRIDGE(dev_priv))
4727                 ivybridge_update_fdi_bc_bifurcation(crtc);
4728
4729         /* Write the TU size bits before fdi link training, so that error
4730          * detection works. */
4731         I915_WRITE(FDI_RX_TUSIZE1(pipe),
4732                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4733
4734         /* For PCH output, training FDI link */
4735         dev_priv->display.fdi_link_train(crtc, crtc_state);
4736
4737         /* We need to program the right clock selection before writing the pixel
4738          * mutliplier into the DPLL. */
4739         if (HAS_PCH_CPT(dev_priv)) {
4740                 u32 sel;
4741
4742                 temp = I915_READ(PCH_DPLL_SEL);
4743                 temp |= TRANS_DPLL_ENABLE(pipe);
4744                 sel = TRANS_DPLLB_SEL(pipe);
4745                 if (crtc_state->shared_dpll ==
4746                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4747                         temp |= sel;
4748                 else
4749                         temp &= ~sel;
4750                 I915_WRITE(PCH_DPLL_SEL, temp);
4751         }
4752
4753         /* XXX: pch pll's can be enabled any time before we enable the PCH
4754          * transcoder, and we actually should do this to not upset any PCH
4755          * transcoder that already use the clock when we share it.
4756          *
4757          * Note that enable_shared_dpll tries to do the right thing, but
4758          * get_shared_dpll unconditionally resets the pll - we need that to have
4759          * the right LVDS enable sequence. */
4760         intel_enable_shared_dpll(crtc);
4761
4762         /* set transcoder timing, panel must allow it */
4763         assert_panel_unlocked(dev_priv, pipe);
4764         ironlake_pch_transcoder_set_timings(crtc, pipe);
4765
4766         intel_fdi_normal_train(crtc);
4767
4768         /* For PCH DP, enable TRANS_DP_CTL */
4769         if (HAS_PCH_CPT(dev_priv) &&
4770             intel_crtc_has_dp_encoder(crtc_state)) {
4771                 const struct drm_display_mode *adjusted_mode =
4772                         &crtc_state->base.adjusted_mode;
4773                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4774                 i915_reg_t reg = TRANS_DP_CTL(pipe);
4775                 enum port port;
4776
4777                 temp = I915_READ(reg);
4778                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4779                           TRANS_DP_SYNC_MASK |
4780                           TRANS_DP_BPC_MASK);
4781                 temp |= TRANS_DP_OUTPUT_ENABLE;
4782                 temp |= bpc << 9; /* same format but at 11:9 */
4783
4784                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4785                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4786                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4787                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4788
4789                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
4790                 WARN_ON(port < PORT_B || port > PORT_D);
4791                 temp |= TRANS_DP_PORT_SEL(port);
4792
4793                 I915_WRITE(reg, temp);
4794         }
4795
4796         ironlake_enable_pch_transcoder(dev_priv, pipe);
4797 }
4798
4799 static void lpt_pch_enable(const struct intel_atomic_state *state,
4800                            const struct intel_crtc_state *crtc_state)
4801 {
4802         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4803         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4804         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4805
4806         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
4807
4808         lpt_program_iclkip(crtc);
4809
4810         /* Set transcoder timing. */
4811         ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
4812
4813         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4814 }
4815
4816 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4817 {
4818         struct drm_i915_private *dev_priv = to_i915(dev);
4819         i915_reg_t dslreg = PIPEDSL(pipe);
4820         u32 temp;
4821
4822         temp = I915_READ(dslreg);
4823         udelay(500);
4824         if (wait_for(I915_READ(dslreg) != temp, 5)) {
4825                 if (wait_for(I915_READ(dslreg) != temp, 5))
4826                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4827         }
4828 }
4829
4830 /*
4831  * The hardware phase 0.0 refers to the center of the pixel.
4832  * We want to start from the top/left edge which is phase
4833  * -0.5. That matches how the hardware calculates the scaling
4834  * factors (from top-left of the first pixel to bottom-right
4835  * of the last pixel, as opposed to the pixel centers).
4836  *
4837  * For 4:2:0 subsampled chroma planes we obviously have to
4838  * adjust that so that the chroma sample position lands in
4839  * the right spot.
4840  *
4841  * Note that for packed YCbCr 4:2:2 formats there is no way to
4842  * control chroma siting. The hardware simply replicates the
4843  * chroma samples for both of the luma samples, and thus we don't
4844  * actually get the expected MPEG2 chroma siting convention :(
4845  * The same behaviour is observed on pre-SKL platforms as well.
4846  */
4847 u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
4848 {
4849         int phase = -0x8000;
4850         u16 trip = 0;
4851
4852         if (chroma_cosited)
4853                 phase += (sub - 1) * 0x8000 / sub;
4854
4855         if (phase < 0)
4856                 phase = 0x10000 + phase;
4857         else
4858                 trip = PS_PHASE_TRIP;
4859
4860         return ((phase >> 2) & PS_PHASE_MASK) | trip;
4861 }
4862
4863 static int
4864 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4865                   unsigned int scaler_user, int *scaler_id,
4866                   int src_w, int src_h, int dst_w, int dst_h,
4867                   bool plane_scaler_check,
4868                   uint32_t pixel_format)
4869 {
4870         struct intel_crtc_scaler_state *scaler_state =
4871                 &crtc_state->scaler_state;
4872         struct intel_crtc *intel_crtc =
4873                 to_intel_crtc(crtc_state->base.crtc);
4874         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4875         const struct drm_display_mode *adjusted_mode =
4876                 &crtc_state->base.adjusted_mode;
4877         int need_scaling;
4878
4879         /*
4880          * Src coordinates are already rotated by 270 degrees for
4881          * the 90/270 degree plane rotation cases (to match the
4882          * GTT mapping), hence no need to account for rotation here.
4883          */
4884         need_scaling = src_w != dst_w || src_h != dst_h;
4885
4886         if (plane_scaler_check)
4887                 if (pixel_format == DRM_FORMAT_NV12)
4888                         need_scaling = true;
4889
4890         if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
4891                 need_scaling = true;
4892
4893         /*
4894          * Scaling/fitting not supported in IF-ID mode in GEN9+
4895          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
4896          * Once NV12 is enabled, handle it here while allocating scaler
4897          * for NV12.
4898          */
4899         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
4900             need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4901                 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
4902                 return -EINVAL;
4903         }
4904
4905         /*
4906          * if plane is being disabled or scaler is no more required or force detach
4907          *  - free scaler binded to this plane/crtc
4908          *  - in order to do this, update crtc->scaler_usage
4909          *
4910          * Here scaler state in crtc_state is set free so that
4911          * scaler can be assigned to other user. Actual register
4912          * update to free the scaler is done in plane/panel-fit programming.
4913          * For this purpose crtc/plane_state->scaler_id isn't reset here.
4914          */
4915         if (force_detach || !need_scaling) {
4916                 if (*scaler_id >= 0) {
4917                         scaler_state->scaler_users &= ~(1 << scaler_user);
4918                         scaler_state->scalers[*scaler_id].in_use = 0;
4919
4920                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4921                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4922                                 intel_crtc->pipe, scaler_user, *scaler_id,
4923                                 scaler_state->scaler_users);
4924                         *scaler_id = -1;
4925                 }
4926                 return 0;
4927         }
4928
4929         if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 &&
4930             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
4931                 DRM_DEBUG_KMS("NV12: src dimensions not met\n");
4932                 return -EINVAL;
4933         }
4934
4935         /* range checks */
4936         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4937             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4938             (IS_GEN11(dev_priv) &&
4939              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
4940               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
4941             (!IS_GEN11(dev_priv) &&
4942              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4943               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
4944                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4945                         "size is out of scaler range\n",
4946                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4947                 return -EINVAL;
4948         }
4949
4950         /* mark this plane as a scaler user in crtc_state */
4951         scaler_state->scaler_users |= (1 << scaler_user);
4952         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4953                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4954                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4955                 scaler_state->scaler_users);
4956
4957         return 0;
4958 }
4959
4960 /**
4961  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4962  *
4963  * @state: crtc's scaler state
4964  *
4965  * Return
4966  *     0 - scaler_usage updated successfully
4967  *    error - requested scaling cannot be supported or other error condition
4968  */
4969 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4970 {
4971         const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4972
4973         return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4974                                  &state->scaler_state.scaler_id,
4975                                  state->pipe_src_w, state->pipe_src_h,
4976                                  adjusted_mode->crtc_hdisplay,
4977                                  adjusted_mode->crtc_vdisplay, false, 0);
4978 }
4979
4980 /**
4981  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4982  * @crtc_state: crtc's scaler state
4983  * @plane_state: atomic plane state to update
4984  *
4985  * Return
4986  *     0 - scaler_usage updated successfully
4987  *    error - requested scaling cannot be supported or other error condition
4988  */
4989 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4990                                    struct intel_plane_state *plane_state)
4991 {
4992
4993         struct intel_plane *intel_plane =
4994                 to_intel_plane(plane_state->base.plane);
4995         struct drm_framebuffer *fb = plane_state->base.fb;
4996         int ret;
4997
4998         bool force_detach = !fb || !plane_state->base.visible;
4999
5000         ret = skl_update_scaler(crtc_state, force_detach,
5001                                 drm_plane_index(&intel_plane->base),
5002                                 &plane_state->scaler_id,
5003                                 drm_rect_width(&plane_state->base.src) >> 16,
5004                                 drm_rect_height(&plane_state->base.src) >> 16,
5005                                 drm_rect_width(&plane_state->base.dst),
5006                                 drm_rect_height(&plane_state->base.dst),
5007                                 fb ? true : false, fb ? fb->format->format : 0);
5008
5009         if (ret || plane_state->scaler_id < 0)
5010                 return ret;
5011
5012         /* check colorkey */
5013         if (plane_state->ckey.flags) {
5014                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5015                               intel_plane->base.base.id,
5016                               intel_plane->base.name);
5017                 return -EINVAL;
5018         }
5019
5020         /* Check src format */
5021         switch (fb->format->format) {
5022         case DRM_FORMAT_RGB565:
5023         case DRM_FORMAT_XBGR8888:
5024         case DRM_FORMAT_XRGB8888:
5025         case DRM_FORMAT_ABGR8888:
5026         case DRM_FORMAT_ARGB8888:
5027         case DRM_FORMAT_XRGB2101010:
5028         case DRM_FORMAT_XBGR2101010:
5029         case DRM_FORMAT_YUYV:
5030         case DRM_FORMAT_YVYU:
5031         case DRM_FORMAT_UYVY:
5032         case DRM_FORMAT_VYUY:
5033         case DRM_FORMAT_NV12:
5034                 break;
5035         default:
5036                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5037                               intel_plane->base.base.id, intel_plane->base.name,
5038                               fb->base.id, fb->format->format);
5039                 return -EINVAL;
5040         }
5041
5042         return 0;
5043 }
5044
5045 static void skylake_scaler_disable(struct intel_crtc *crtc)
5046 {
5047         int i;
5048
5049         for (i = 0; i < crtc->num_scalers; i++)
5050                 skl_detach_scaler(crtc, i);
5051 }
5052
5053 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5054 {
5055         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5056         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5057         enum pipe pipe = crtc->pipe;
5058         const struct intel_crtc_scaler_state *scaler_state =
5059                 &crtc_state->scaler_state;
5060
5061         if (crtc_state->pch_pfit.enabled) {
5062                 u16 uv_rgb_hphase, uv_rgb_vphase;
5063                 int id;
5064
5065                 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5066                         return;
5067
5068                 uv_rgb_hphase = skl_scaler_calc_phase(1, false);
5069                 uv_rgb_vphase = skl_scaler_calc_phase(1, false);
5070
5071                 id = scaler_state->scaler_id;
5072                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5073                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5074                 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5075                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5076                 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5077                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5078                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5079                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5080         }
5081 }
5082
5083 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5084 {
5085         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5086         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5087         int pipe = crtc->pipe;
5088
5089         if (crtc_state->pch_pfit.enabled) {
5090                 /* Force use of hard-coded filter coefficients
5091                  * as some pre-programmed values are broken,
5092                  * e.g. x201.
5093                  */
5094                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5095                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5096                                                  PF_PIPE_SEL_IVB(pipe));
5097                 else
5098                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5099                 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5100                 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5101         }
5102 }
5103
5104 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5105 {
5106         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5107         struct drm_device *dev = crtc->base.dev;
5108         struct drm_i915_private *dev_priv = to_i915(dev);
5109
5110         if (!crtc_state->ips_enabled)
5111                 return;
5112
5113         /*
5114          * We can only enable IPS after we enable a plane and wait for a vblank
5115          * This function is called from post_plane_update, which is run after
5116          * a vblank wait.
5117          */
5118         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5119
5120         if (IS_BROADWELL(dev_priv)) {
5121                 mutex_lock(&dev_priv->pcu_lock);
5122                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5123                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
5124                 mutex_unlock(&dev_priv->pcu_lock);
5125                 /* Quoting Art Runyan: "its not safe to expect any particular
5126                  * value in IPS_CTL bit 31 after enabling IPS through the
5127                  * mailbox." Moreover, the mailbox may return a bogus state,
5128                  * so we need to just enable it and continue on.
5129                  */
5130         } else {
5131                 I915_WRITE(IPS_CTL, IPS_ENABLE);
5132                 /* The bit only becomes 1 in the next vblank, so this wait here
5133                  * is essentially intel_wait_for_vblank. If we don't have this
5134                  * and don't wait for vblanks until the end of crtc_enable, then
5135                  * the HW state readout code will complain that the expected
5136                  * IPS_CTL value is not the one we read. */
5137                 if (intel_wait_for_register(dev_priv,
5138                                             IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5139                                             50))
5140                         DRM_ERROR("Timed out waiting for IPS enable\n");
5141         }
5142 }
5143
5144 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5145 {
5146         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5147         struct drm_device *dev = crtc->base.dev;
5148         struct drm_i915_private *dev_priv = to_i915(dev);
5149
5150         if (!crtc_state->ips_enabled)
5151                 return;
5152
5153         if (IS_BROADWELL(dev_priv)) {
5154                 mutex_lock(&dev_priv->pcu_lock);
5155                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5156                 mutex_unlock(&dev_priv->pcu_lock);
5157                 /*
5158                  * Wait for PCODE to finish disabling IPS. The BSpec specified
5159                  * 42ms timeout value leads to occasional timeouts so use 100ms
5160                  * instead.
5161                  */
5162                 if (intel_wait_for_register(dev_priv,
5163                                             IPS_CTL, IPS_ENABLE, 0,
5164                                             100))
5165                         DRM_ERROR("Timed out waiting for IPS disable\n");
5166         } else {
5167                 I915_WRITE(IPS_CTL, 0);
5168                 POSTING_READ(IPS_CTL);
5169         }
5170
5171         /* We need to wait for a vblank before we can disable the plane. */
5172         intel_wait_for_vblank(dev_priv, crtc->pipe);
5173 }
5174
5175 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5176 {
5177         if (intel_crtc->overlay) {
5178                 struct drm_device *dev = intel_crtc->base.dev;
5179
5180                 mutex_lock(&dev->struct_mutex);
5181                 (void) intel_overlay_switch_off(intel_crtc->overlay);
5182                 mutex_unlock(&dev->struct_mutex);
5183         }
5184
5185         /* Let userspace switch the overlay on again. In most cases userspace
5186          * has to recompute where to put it anyway.
5187          */
5188 }
5189
5190 /**
5191  * intel_post_enable_primary - Perform operations after enabling primary plane
5192  * @crtc: the CRTC whose primary plane was just enabled
5193  * @new_crtc_state: the enabling state
5194  *
5195  * Performs potentially sleeping operations that must be done after the primary
5196  * plane is enabled, such as updating FBC and IPS.  Note that this may be
5197  * called due to an explicit primary plane update, or due to an implicit
5198  * re-enable that is caused when a sprite plane is updated to no longer
5199  * completely hide the primary plane.
5200  */
5201 static void
5202 intel_post_enable_primary(struct drm_crtc *crtc,
5203                           const struct intel_crtc_state *new_crtc_state)
5204 {
5205         struct drm_device *dev = crtc->dev;
5206         struct drm_i915_private *dev_priv = to_i915(dev);
5207         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5208         int pipe = intel_crtc->pipe;
5209
5210         /*
5211          * Gen2 reports pipe underruns whenever all planes are disabled.
5212          * So don't enable underrun reporting before at least some planes
5213          * are enabled.
5214          * FIXME: Need to fix the logic to work when we turn off all planes
5215          * but leave the pipe running.
5216          */
5217         if (IS_GEN2(dev_priv))
5218                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5219
5220         /* Underruns don't always raise interrupts, so check manually. */
5221         intel_check_cpu_fifo_underruns(dev_priv);
5222         intel_check_pch_fifo_underruns(dev_priv);
5223 }
5224
5225 /* FIXME get rid of this and use pre_plane_update */
5226 static void
5227 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5228 {
5229         struct drm_device *dev = crtc->dev;
5230         struct drm_i915_private *dev_priv = to_i915(dev);
5231         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5232         int pipe = intel_crtc->pipe;
5233
5234         /*
5235          * Gen2 reports pipe underruns whenever all planes are disabled.
5236          * So disable underrun reporting before all the planes get disabled.
5237          */
5238         if (IS_GEN2(dev_priv))
5239                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5240
5241         hsw_disable_ips(to_intel_crtc_state(crtc->state));
5242
5243         /*
5244          * Vblank time updates from the shadow to live plane control register
5245          * are blocked if the memory self-refresh mode is active at that
5246          * moment. So to make sure the plane gets truly disabled, disable
5247          * first the self-refresh mode. The self-refresh enable bit in turn
5248          * will be checked/applied by the HW only at the next frame start
5249          * event which is after the vblank start event, so we need to have a
5250          * wait-for-vblank between disabling the plane and the pipe.
5251          */
5252         if (HAS_GMCH_DISPLAY(dev_priv) &&
5253             intel_set_memory_cxsr(dev_priv, false))
5254                 intel_wait_for_vblank(dev_priv, pipe);
5255 }
5256
5257 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5258                                        const struct intel_crtc_state *new_crtc_state)
5259 {
5260         if (!old_crtc_state->ips_enabled)
5261                 return false;
5262
5263         if (needs_modeset(&new_crtc_state->base))
5264                 return true;
5265
5266         return !new_crtc_state->ips_enabled;
5267 }
5268
5269 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5270                                        const struct intel_crtc_state *new_crtc_state)
5271 {
5272         if (!new_crtc_state->ips_enabled)
5273                 return false;
5274
5275         if (needs_modeset(&new_crtc_state->base))
5276                 return true;
5277
5278         /*
5279          * We can't read out IPS on broadwell, assume the worst and
5280          * forcibly enable IPS on the first fastset.
5281          */
5282         if (new_crtc_state->update_pipe &&
5283             old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5284                 return true;
5285
5286         return !old_crtc_state->ips_enabled;
5287 }
5288
5289 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5290                           const struct intel_crtc_state *crtc_state)
5291 {
5292         if (!crtc_state->nv12_planes)
5293                 return false;
5294
5295         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
5296                 return false;
5297
5298         if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
5299             IS_CANNONLAKE(dev_priv))
5300                 return true;
5301
5302         return false;
5303 }
5304
5305 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5306 {
5307         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5308         struct drm_device *dev = crtc->base.dev;
5309         struct drm_i915_private *dev_priv = to_i915(dev);
5310         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5311         struct intel_crtc_state *pipe_config =
5312                 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5313                                                 crtc);
5314         struct drm_plane *primary = crtc->base.primary;
5315         struct drm_plane_state *old_primary_state =
5316                 drm_atomic_get_old_plane_state(old_state, primary);
5317
5318         intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5319
5320         if (pipe_config->update_wm_post && pipe_config->base.active)
5321                 intel_update_watermarks(crtc);
5322
5323         if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5324                 hsw_enable_ips(pipe_config);
5325
5326         if (old_primary_state) {
5327                 struct drm_plane_state *new_primary_state =
5328                         drm_atomic_get_new_plane_state(old_state, primary);
5329
5330                 intel_fbc_post_update(crtc);
5331
5332                 if (new_primary_state->visible &&
5333                     (needs_modeset(&pipe_config->base) ||
5334                      !old_primary_state->visible))
5335                         intel_post_enable_primary(&crtc->base, pipe_config);
5336         }
5337
5338         /* Display WA 827 */
5339         if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5340             !needs_nv12_wa(dev_priv, pipe_config)) {
5341                 skl_wa_clkgate(dev_priv, crtc->pipe, false);
5342                 skl_wa_528(dev_priv, crtc->pipe, false);
5343         }
5344 }
5345
5346 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5347                                    struct intel_crtc_state *pipe_config)
5348 {
5349         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5350         struct drm_device *dev = crtc->base.dev;
5351         struct drm_i915_private *dev_priv = to_i915(dev);
5352         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5353         struct drm_plane *primary = crtc->base.primary;
5354         struct drm_plane_state *old_primary_state =
5355                 drm_atomic_get_old_plane_state(old_state, primary);
5356         bool modeset = needs_modeset(&pipe_config->base);
5357         struct intel_atomic_state *old_intel_state =
5358                 to_intel_atomic_state(old_state);
5359
5360         if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5361                 hsw_disable_ips(old_crtc_state);
5362
5363         if (old_primary_state) {
5364                 struct intel_plane_state *new_primary_state =
5365                         intel_atomic_get_new_plane_state(old_intel_state,
5366                                                          to_intel_plane(primary));
5367
5368                 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5369                 /*
5370                  * Gen2 reports pipe underruns whenever all planes are disabled.
5371                  * So disable underrun reporting before all the planes get disabled.
5372                  */
5373                 if (IS_GEN2(dev_priv) && old_primary_state->visible &&
5374                     (modeset || !new_primary_state->base.visible))
5375                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5376         }
5377
5378         /* Display WA 827 */
5379         if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5380             needs_nv12_wa(dev_priv, pipe_config)) {
5381                 skl_wa_clkgate(dev_priv, crtc->pipe, true);
5382                 skl_wa_528(dev_priv, crtc->pipe, true);
5383         }
5384
5385         /*
5386          * Vblank time updates from the shadow to live plane control register
5387          * are blocked if the memory self-refresh mode is active at that
5388          * moment. So to make sure the plane gets truly disabled, disable
5389          * first the self-refresh mode. The self-refresh enable bit in turn
5390          * will be checked/applied by the HW only at the next frame start
5391          * event which is after the vblank start event, so we need to have a
5392          * wait-for-vblank between disabling the plane and the pipe.
5393          */
5394         if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active &&
5395             pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5396                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5397
5398         /*
5399          * IVB workaround: must disable low power watermarks for at least
5400          * one frame before enabling scaling.  LP watermarks can be re-enabled
5401          * when scaling is disabled.
5402          *
5403          * WaCxSRDisabledForSpriteScaling:ivb
5404          */
5405         if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
5406                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5407
5408         /*
5409          * If we're doing a modeset, we're done.  No need to do any pre-vblank
5410          * watermark programming here.
5411          */
5412         if (needs_modeset(&pipe_config->base))
5413                 return;
5414
5415         /*
5416          * For platforms that support atomic watermarks, program the
5417          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
5418          * will be the intermediate values that are safe for both pre- and
5419          * post- vblank; when vblank happens, the 'active' values will be set
5420          * to the final 'target' values and we'll do this again to get the
5421          * optimal watermarks.  For gen9+ platforms, the values we program here
5422          * will be the final target values which will get automatically latched
5423          * at vblank time; no further programming will be necessary.
5424          *
5425          * If a platform hasn't been transitioned to atomic watermarks yet,
5426          * we'll continue to update watermarks the old way, if flags tell
5427          * us to.
5428          */
5429         if (dev_priv->display.initial_watermarks != NULL)
5430                 dev_priv->display.initial_watermarks(old_intel_state,
5431                                                      pipe_config);
5432         else if (pipe_config->update_wm_pre)
5433                 intel_update_watermarks(crtc);
5434 }
5435
5436 static void intel_crtc_disable_planes(struct intel_crtc *crtc, unsigned plane_mask)
5437 {
5438         struct drm_device *dev = crtc->base.dev;
5439         struct intel_plane *plane;
5440         unsigned fb_bits = 0;
5441
5442         intel_crtc_dpms_overlay_disable(crtc);
5443
5444         for_each_intel_plane_on_crtc(dev, crtc, plane) {
5445                 if (plane_mask & BIT(plane->id)) {
5446                         plane->disable_plane(plane, crtc);
5447
5448                         fb_bits |= plane->frontbuffer_bit;
5449                 }
5450         }
5451
5452         intel_frontbuffer_flip(to_i915(dev), fb_bits);
5453 }
5454
5455 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
5456                                           struct intel_crtc_state *crtc_state,
5457                                           struct drm_atomic_state *old_state)
5458 {
5459         struct drm_connector_state *conn_state;
5460         struct drm_connector *conn;
5461         int i;
5462
5463         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5464                 struct intel_encoder *encoder =
5465                         to_intel_encoder(conn_state->best_encoder);
5466
5467                 if (conn_state->crtc != crtc)
5468                         continue;
5469
5470                 if (encoder->pre_pll_enable)
5471                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
5472         }
5473 }
5474
5475 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
5476                                       struct intel_crtc_state *crtc_state,
5477                                       struct drm_atomic_state *old_state)
5478 {
5479         struct drm_connector_state *conn_state;
5480         struct drm_connector *conn;
5481         int i;
5482
5483         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5484                 struct intel_encoder *encoder =
5485                         to_intel_encoder(conn_state->best_encoder);
5486
5487                 if (conn_state->crtc != crtc)
5488                         continue;
5489
5490                 if (encoder->pre_enable)
5491                         encoder->pre_enable(encoder, crtc_state, conn_state);
5492         }
5493 }
5494
5495 static void intel_encoders_enable(struct drm_crtc *crtc,
5496                                   struct intel_crtc_state *crtc_state,
5497                                   struct drm_atomic_state *old_state)
5498 {
5499         struct drm_connector_state *conn_state;
5500         struct drm_connector *conn;
5501         int i;
5502
5503         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5504                 struct intel_encoder *encoder =
5505                         to_intel_encoder(conn_state->best_encoder);
5506
5507                 if (conn_state->crtc != crtc)
5508                         continue;
5509
5510                 encoder->enable(encoder, crtc_state, conn_state);
5511                 intel_opregion_notify_encoder(encoder, true);
5512         }
5513 }
5514
5515 static void intel_encoders_disable(struct drm_crtc *crtc,
5516                                    struct intel_crtc_state *old_crtc_state,
5517                                    struct drm_atomic_state *old_state)
5518 {
5519         struct drm_connector_state *old_conn_state;
5520         struct drm_connector *conn;
5521         int i;
5522
5523         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5524                 struct intel_encoder *encoder =
5525                         to_intel_encoder(old_conn_state->best_encoder);
5526
5527                 if (old_conn_state->crtc != crtc)
5528                         continue;
5529
5530                 intel_opregion_notify_encoder(encoder, false);
5531                 encoder->disable(encoder, old_crtc_state, old_conn_state);
5532         }
5533 }
5534
5535 static void intel_encoders_post_disable(struct drm_crtc *crtc,
5536                                         struct intel_crtc_state *old_crtc_state,
5537                                         struct drm_atomic_state *old_state)
5538 {
5539         struct drm_connector_state *old_conn_state;
5540         struct drm_connector *conn;
5541         int i;
5542
5543         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5544                 struct intel_encoder *encoder =
5545                         to_intel_encoder(old_conn_state->best_encoder);
5546
5547                 if (old_conn_state->crtc != crtc)
5548                         continue;
5549
5550                 if (encoder->post_disable)
5551                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
5552         }
5553 }
5554
5555 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
5556                                             struct intel_crtc_state *old_crtc_state,
5557                                             struct drm_atomic_state *old_state)
5558 {
5559         struct drm_connector_state *old_conn_state;
5560         struct drm_connector *conn;
5561         int i;
5562
5563         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5564                 struct intel_encoder *encoder =
5565                         to_intel_encoder(old_conn_state->best_encoder);
5566
5567                 if (old_conn_state->crtc != crtc)
5568                         continue;
5569
5570                 if (encoder->post_pll_disable)
5571                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
5572         }
5573 }
5574
5575 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5576                                  struct drm_atomic_state *old_state)
5577 {
5578         struct drm_crtc *crtc = pipe_config->base.crtc;
5579         struct drm_device *dev = crtc->dev;
5580         struct drm_i915_private *dev_priv = to_i915(dev);
5581         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5582         int pipe = intel_crtc->pipe;
5583         struct intel_atomic_state *old_intel_state =
5584                 to_intel_atomic_state(old_state);
5585
5586         if (WARN_ON(intel_crtc->active))
5587                 return;
5588
5589         /*
5590          * Sometimes spurious CPU pipe underruns happen during FDI
5591          * training, at least with VGA+HDMI cloning. Suppress them.
5592          *
5593          * On ILK we get an occasional spurious CPU pipe underruns
5594          * between eDP port A enable and vdd enable. Also PCH port
5595          * enable seems to result in the occasional CPU pipe underrun.
5596          *
5597          * Spurious PCH underruns also occur during PCH enabling.
5598          */
5599         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5600         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5601
5602         if (intel_crtc->config->has_pch_encoder)
5603                 intel_prepare_shared_dpll(intel_crtc);
5604
5605         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5606                 intel_dp_set_m_n(intel_crtc, M1_N1);
5607
5608         intel_set_pipe_timings(pipe_config);
5609         intel_set_pipe_src_size(pipe_config);
5610
5611         if (intel_crtc->config->has_pch_encoder) {
5612                 intel_cpu_transcoder_set_m_n(intel_crtc,
5613                                      &intel_crtc->config->fdi_m_n, NULL);
5614         }
5615
5616         ironlake_set_pipeconf(pipe_config);
5617
5618         intel_crtc->active = true;
5619
5620         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5621
5622         if (intel_crtc->config->has_pch_encoder) {
5623                 /* Note: FDI PLL enabling _must_ be done before we enable the
5624                  * cpu pipes, hence this is separate from all the other fdi/pch
5625                  * enabling. */
5626                 ironlake_fdi_pll_enable(intel_crtc);
5627         } else {
5628                 assert_fdi_tx_disabled(dev_priv, pipe);
5629                 assert_fdi_rx_disabled(dev_priv, pipe);
5630         }
5631
5632         ironlake_pfit_enable(pipe_config);
5633
5634         /*
5635          * On ILK+ LUT must be loaded before the pipe is running but with
5636          * clocks enabled
5637          */
5638         intel_color_load_luts(&pipe_config->base);
5639
5640         if (dev_priv->display.initial_watermarks != NULL)
5641                 dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
5642         intel_enable_pipe(pipe_config);
5643
5644         if (intel_crtc->config->has_pch_encoder)
5645                 ironlake_pch_enable(old_intel_state, pipe_config);
5646
5647         assert_vblank_disabled(crtc);
5648         drm_crtc_vblank_on(crtc);
5649
5650         intel_encoders_enable(crtc, pipe_config, old_state);
5651
5652         if (HAS_PCH_CPT(dev_priv))
5653                 cpt_verify_modeset(dev, intel_crtc->pipe);
5654
5655         /*
5656          * Must wait for vblank to avoid spurious PCH FIFO underruns.
5657          * And a second vblank wait is needed at least on ILK with
5658          * some interlaced HDMI modes. Let's do the double wait always
5659          * in case there are more corner cases we don't know about.
5660          */
5661         if (intel_crtc->config->has_pch_encoder) {
5662                 intel_wait_for_vblank(dev_priv, pipe);
5663                 intel_wait_for_vblank(dev_priv, pipe);
5664         }
5665         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5666         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5667 }
5668
5669 /* IPS only exists on ULT machines and is tied to pipe A. */
5670 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5671 {
5672         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
5673 }
5674
5675 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
5676                                             enum pipe pipe, bool apply)
5677 {
5678         u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
5679         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
5680
5681         if (apply)
5682                 val |= mask;
5683         else
5684                 val &= ~mask;
5685
5686         I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
5687 }
5688
5689 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5690 {
5691         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5692         enum pipe pipe = crtc->pipe;
5693         uint32_t val;
5694
5695         val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2);
5696
5697         /* Program B credit equally to all pipes */
5698         val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
5699
5700         I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5701 }
5702
5703 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5704                                 struct drm_atomic_state *old_state)
5705 {
5706         struct drm_crtc *crtc = pipe_config->base.crtc;
5707         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5708         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5709         int pipe = intel_crtc->pipe, hsw_workaround_pipe;
5710         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5711         struct intel_atomic_state *old_intel_state =
5712                 to_intel_atomic_state(old_state);
5713         bool psl_clkgate_wa;
5714         u32 pipe_chicken;
5715
5716         if (WARN_ON(intel_crtc->active))
5717                 return;
5718
5719         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
5720
5721         if (intel_crtc->config->shared_dpll)
5722                 intel_enable_shared_dpll(intel_crtc);
5723
5724         if (INTEL_GEN(dev_priv) >= 11)
5725                 icl_map_plls_to_ports(crtc, pipe_config, old_state);
5726
5727         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5728
5729         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5730                 intel_dp_set_m_n(intel_crtc, M1_N1);
5731
5732         if (!transcoder_is_dsi(cpu_transcoder))
5733                 intel_set_pipe_timings(pipe_config);
5734
5735         intel_set_pipe_src_size(pipe_config);
5736
5737         if (cpu_transcoder != TRANSCODER_EDP &&
5738             !transcoder_is_dsi(cpu_transcoder)) {
5739                 I915_WRITE(PIPE_MULT(cpu_transcoder),
5740                            intel_crtc->config->pixel_multiplier - 1);
5741         }
5742
5743         if (intel_crtc->config->has_pch_encoder) {
5744                 intel_cpu_transcoder_set_m_n(intel_crtc,
5745                                      &intel_crtc->config->fdi_m_n, NULL);
5746         }
5747
5748         if (!transcoder_is_dsi(cpu_transcoder))
5749                 haswell_set_pipeconf(pipe_config);
5750
5751         haswell_set_pipemisc(pipe_config);
5752
5753         intel_color_set_csc(&pipe_config->base);
5754
5755         intel_crtc->active = true;
5756
5757         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
5758         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
5759                          intel_crtc->config->pch_pfit.enabled;
5760         if (psl_clkgate_wa)
5761                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
5762
5763         if (INTEL_GEN(dev_priv) >= 9)
5764                 skylake_pfit_enable(pipe_config);
5765         else
5766                 ironlake_pfit_enable(pipe_config);
5767
5768         /*
5769          * On ILK+ LUT must be loaded before the pipe is running but with
5770          * clocks enabled
5771          */
5772         intel_color_load_luts(&pipe_config->base);
5773
5774         /*
5775          * Display WA #1153: enable hardware to bypass the alpha math
5776          * and rounding for per-pixel values 00 and 0xff
5777          */
5778         if (INTEL_GEN(dev_priv) >= 11) {
5779                 pipe_chicken = I915_READ(PIPE_CHICKEN(pipe));
5780                 if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN))
5781                         I915_WRITE_FW(PIPE_CHICKEN(pipe),
5782                                       pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN);
5783         }
5784
5785         intel_ddi_set_pipe_settings(pipe_config);
5786         if (!transcoder_is_dsi(cpu_transcoder))
5787                 intel_ddi_enable_transcoder_func(pipe_config);
5788
5789         if (dev_priv->display.initial_watermarks != NULL)
5790                 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
5791
5792         if (INTEL_GEN(dev_priv) >= 11)
5793                 icl_pipe_mbus_enable(intel_crtc);
5794
5795         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5796         if (!transcoder_is_dsi(cpu_transcoder))
5797                 intel_enable_pipe(pipe_config);
5798
5799         if (intel_crtc->config->has_pch_encoder)
5800                 lpt_pch_enable(old_intel_state, pipe_config);
5801
5802         if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
5803                 intel_ddi_set_vc_payload_alloc(pipe_config, true);
5804
5805         assert_vblank_disabled(crtc);
5806         drm_crtc_vblank_on(crtc);
5807
5808         intel_encoders_enable(crtc, pipe_config, old_state);
5809
5810         if (psl_clkgate_wa) {
5811                 intel_wait_for_vblank(dev_priv, pipe);
5812                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
5813         }
5814
5815         /* If we change the relative order between pipe/planes enabling, we need
5816          * to change the workaround. */
5817         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5818         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
5819                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5820                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5821         }
5822 }
5823
5824 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
5825 {
5826         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5827         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5828         enum pipe pipe = crtc->pipe;
5829
5830         /* To avoid upsetting the power well on haswell only disable the pfit if
5831          * it's in use. The hw state code will make sure we get this right. */
5832         if (old_crtc_state->pch_pfit.enabled) {
5833                 I915_WRITE(PF_CTL(pipe), 0);
5834                 I915_WRITE(PF_WIN_POS(pipe), 0);
5835                 I915_WRITE(PF_WIN_SZ(pipe), 0);
5836         }
5837 }
5838
5839 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
5840                                   struct drm_atomic_state *old_state)
5841 {
5842         struct drm_crtc *crtc = old_crtc_state->base.crtc;
5843         struct drm_device *dev = crtc->dev;
5844         struct drm_i915_private *dev_priv = to_i915(dev);
5845         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5846         int pipe = intel_crtc->pipe;
5847
5848         /*
5849          * Sometimes spurious CPU pipe underruns happen when the
5850          * pipe is already disabled, but FDI RX/TX is still enabled.
5851          * Happens at least with VGA+HDMI cloning. Suppress them.
5852          */
5853         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5854         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5855
5856         intel_encoders_disable(crtc, old_crtc_state, old_state);
5857
5858         drm_crtc_vblank_off(crtc);
5859         assert_vblank_disabled(crtc);
5860
5861         intel_disable_pipe(old_crtc_state);
5862
5863         ironlake_pfit_disable(old_crtc_state);
5864
5865         if (intel_crtc->config->has_pch_encoder)
5866                 ironlake_fdi_disable(crtc);
5867
5868         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5869
5870         if (intel_crtc->config->has_pch_encoder) {
5871                 ironlake_disable_pch_transcoder(dev_priv, pipe);
5872
5873                 if (HAS_PCH_CPT(dev_priv)) {
5874                         i915_reg_t reg;
5875                         u32 temp;
5876
5877                         /* disable TRANS_DP_CTL */
5878                         reg = TRANS_DP_CTL(pipe);
5879                         temp = I915_READ(reg);
5880                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5881                                   TRANS_DP_PORT_SEL_MASK);
5882                         temp |= TRANS_DP_PORT_SEL_NONE;
5883                         I915_WRITE(reg, temp);
5884
5885                         /* disable DPLL_SEL */
5886                         temp = I915_READ(PCH_DPLL_SEL);
5887                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5888                         I915_WRITE(PCH_DPLL_SEL, temp);
5889                 }
5890
5891                 ironlake_fdi_pll_disable(intel_crtc);
5892         }
5893
5894         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5895         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5896 }
5897
5898 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
5899                                  struct drm_atomic_state *old_state)
5900 {
5901         struct drm_crtc *crtc = old_crtc_state->base.crtc;
5902         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5903         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5904         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
5905
5906         intel_encoders_disable(crtc, old_crtc_state, old_state);
5907
5908         drm_crtc_vblank_off(crtc);
5909         assert_vblank_disabled(crtc);
5910
5911         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5912         if (!transcoder_is_dsi(cpu_transcoder))
5913                 intel_disable_pipe(old_crtc_state);
5914
5915         if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
5916                 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
5917
5918         if (!transcoder_is_dsi(cpu_transcoder))
5919                 intel_ddi_disable_transcoder_func(old_crtc_state);
5920
5921         if (INTEL_GEN(dev_priv) >= 9)
5922                 skylake_scaler_disable(intel_crtc);
5923         else
5924                 ironlake_pfit_disable(old_crtc_state);
5925
5926         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5927
5928         if (INTEL_GEN(dev_priv) >= 11)
5929                 icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
5930 }
5931
5932 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
5933 {
5934         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5935         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5936
5937         if (!crtc_state->gmch_pfit.control)
5938                 return;
5939
5940         /*
5941          * The panel fitter should only be adjusted whilst the pipe is disabled,
5942          * according to register description and PRM.
5943          */
5944         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5945         assert_pipe_disabled(dev_priv, crtc->pipe);
5946
5947         I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
5948         I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
5949
5950         /* Border color in case we don't scale up to the full screen. Black by
5951          * default, change to something else for debugging. */
5952         I915_WRITE(BCLRPAT(crtc->pipe), 0);
5953 }
5954
5955 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
5956 {
5957         if (IS_ICELAKE(dev_priv))
5958                 return port >= PORT_C && port <= PORT_F;
5959
5960         return false;
5961 }
5962
5963 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
5964 {
5965         if (!intel_port_is_tc(dev_priv, port))
5966                 return PORT_TC_NONE;
5967
5968         return port - PORT_C;
5969 }
5970
5971 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
5972 {
5973         switch (port) {
5974         case PORT_A:
5975                 return POWER_DOMAIN_PORT_DDI_A_LANES;
5976         case PORT_B:
5977                 return POWER_DOMAIN_PORT_DDI_B_LANES;
5978         case PORT_C:
5979                 return POWER_DOMAIN_PORT_DDI_C_LANES;
5980         case PORT_D:
5981                 return POWER_DOMAIN_PORT_DDI_D_LANES;
5982         case PORT_E:
5983                 return POWER_DOMAIN_PORT_DDI_E_LANES;
5984         case PORT_F:
5985                 return POWER_DOMAIN_PORT_DDI_F_LANES;
5986         default:
5987                 MISSING_CASE(port);
5988                 return POWER_DOMAIN_PORT_OTHER;
5989         }
5990 }
5991
5992 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
5993                                   struct intel_crtc_state *crtc_state)
5994 {
5995         struct drm_device *dev = crtc->dev;
5996         struct drm_i915_private *dev_priv = to_i915(dev);
5997         struct drm_encoder *encoder;
5998         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5999         enum pipe pipe = intel_crtc->pipe;
6000         u64 mask;
6001         enum transcoder transcoder = crtc_state->cpu_transcoder;
6002
6003         if (!crtc_state->base.active)
6004                 return 0;
6005
6006         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6007         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6008         if (crtc_state->pch_pfit.enabled ||
6009             crtc_state->pch_pfit.force_thru)
6010                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6011
6012         drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
6013                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6014
6015                 mask |= BIT_ULL(intel_encoder->power_domain);
6016         }
6017
6018         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6019                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6020
6021         if (crtc_state->shared_dpll)
6022                 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
6023
6024         return mask;
6025 }
6026
6027 static u64
6028 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
6029                                struct intel_crtc_state *crtc_state)
6030 {
6031         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6032         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6033         enum intel_display_power_domain domain;
6034         u64 domains, new_domains, old_domains;
6035
6036         old_domains = intel_crtc->enabled_power_domains;
6037         intel_crtc->enabled_power_domains = new_domains =
6038                 get_crtc_power_domains(crtc, crtc_state);
6039
6040         domains = new_domains & ~old_domains;
6041
6042         for_each_power_domain(domain, domains)
6043                 intel_display_power_get(dev_priv, domain);
6044
6045         return old_domains & ~new_domains;
6046 }
6047
6048 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6049                                       u64 domains)
6050 {
6051         enum intel_display_power_domain domain;
6052
6053         for_each_power_domain(domain, domains)
6054                 intel_display_power_put(dev_priv, domain);
6055 }
6056
6057 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6058                                    struct drm_atomic_state *old_state)
6059 {
6060         struct intel_atomic_state *old_intel_state =
6061                 to_intel_atomic_state(old_state);
6062         struct drm_crtc *crtc = pipe_config->base.crtc;
6063         struct drm_device *dev = crtc->dev;
6064         struct drm_i915_private *dev_priv = to_i915(dev);
6065         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6066         int pipe = intel_crtc->pipe;
6067
6068         if (WARN_ON(intel_crtc->active))
6069                 return;
6070
6071         if (intel_crtc_has_dp_encoder(intel_crtc->config))
6072                 intel_dp_set_m_n(intel_crtc, M1_N1);
6073
6074         intel_set_pipe_timings(pipe_config);
6075         intel_set_pipe_src_size(pipe_config);
6076
6077         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6078                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6079                 I915_WRITE(CHV_CANVAS(pipe), 0);
6080         }
6081
6082         i9xx_set_pipeconf(pipe_config);
6083
6084         intel_color_set_csc(&pipe_config->base);
6085
6086         intel_crtc->active = true;
6087
6088         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6089
6090         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6091
6092         if (IS_CHERRYVIEW(dev_priv)) {
6093                 chv_prepare_pll(intel_crtc, intel_crtc->config);
6094                 chv_enable_pll(intel_crtc, intel_crtc->config);
6095         } else {
6096                 vlv_prepare_pll(intel_crtc, intel_crtc->config);
6097                 vlv_enable_pll(intel_crtc, intel_crtc->config);
6098         }
6099
6100         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6101
6102         i9xx_pfit_enable(pipe_config);
6103
6104         intel_color_load_luts(&pipe_config->base);
6105
6106         dev_priv->display.initial_watermarks(old_intel_state,
6107                                              pipe_config);
6108         intel_enable_pipe(pipe_config);
6109
6110         assert_vblank_disabled(crtc);
6111         drm_crtc_vblank_on(crtc);
6112
6113         intel_encoders_enable(crtc, pipe_config, old_state);
6114 }
6115
6116 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6117 {
6118         struct drm_device *dev = crtc->base.dev;
6119         struct drm_i915_private *dev_priv = to_i915(dev);
6120
6121         I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6122         I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6123 }
6124
6125 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6126                              struct drm_atomic_state *old_state)
6127 {
6128         struct intel_atomic_state *old_intel_state =
6129                 to_intel_atomic_state(old_state);
6130         struct drm_crtc *crtc = pipe_config->base.crtc;
6131         struct drm_device *dev = crtc->dev;
6132         struct drm_i915_private *dev_priv = to_i915(dev);
6133         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6134         enum pipe pipe = intel_crtc->pipe;
6135
6136         if (WARN_ON(intel_crtc->active))
6137                 return;
6138
6139         i9xx_set_pll_dividers(intel_crtc);
6140
6141         if (intel_crtc_has_dp_encoder(intel_crtc->config))
6142                 intel_dp_set_m_n(intel_crtc, M1_N1);
6143
6144         intel_set_pipe_timings(pipe_config);
6145         intel_set_pipe_src_size(pipe_config);
6146
6147         i9xx_set_pipeconf(pipe_config);
6148
6149         intel_crtc->active = true;
6150
6151         if (!IS_GEN2(dev_priv))
6152                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6153
6154         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6155
6156         i9xx_enable_pll(intel_crtc, pipe_config);
6157
6158         i9xx_pfit_enable(pipe_config);
6159
6160         intel_color_load_luts(&pipe_config->base);
6161
6162         if (dev_priv->display.initial_watermarks != NULL)
6163                 dev_priv->display.initial_watermarks(old_intel_state,
6164                                                      intel_crtc->config);
6165         else
6166                 intel_update_watermarks(intel_crtc);
6167         intel_enable_pipe(pipe_config);
6168
6169         assert_vblank_disabled(crtc);
6170         drm_crtc_vblank_on(crtc);
6171
6172         intel_encoders_enable(crtc, pipe_config, old_state);
6173 }
6174
6175 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6176 {
6177         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6178         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6179
6180         if (!old_crtc_state->gmch_pfit.control)
6181                 return;
6182
6183         assert_pipe_disabled(dev_priv, crtc->pipe);
6184
6185         DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6186                       I915_READ(PFIT_CONTROL));
6187         I915_WRITE(PFIT_CONTROL, 0);
6188 }
6189
6190 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6191                               struct drm_atomic_state *old_state)
6192 {
6193         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6194         struct drm_device *dev = crtc->dev;
6195         struct drm_i915_private *dev_priv = to_i915(dev);
6196         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6197         int pipe = intel_crtc->pipe;
6198
6199         /*
6200          * On gen2 planes are double buffered but the pipe isn't, so we must
6201          * wait for planes to fully turn off before disabling the pipe.
6202          */
6203         if (IS_GEN2(dev_priv))
6204                 intel_wait_for_vblank(dev_priv, pipe);
6205
6206         intel_encoders_disable(crtc, old_crtc_state, old_state);
6207
6208         drm_crtc_vblank_off(crtc);
6209         assert_vblank_disabled(crtc);
6210
6211         intel_disable_pipe(old_crtc_state);
6212
6213         i9xx_pfit_disable(old_crtc_state);
6214
6215         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6216
6217         if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
6218                 if (IS_CHERRYVIEW(dev_priv))
6219                         chv_disable_pll(dev_priv, pipe);
6220                 else if (IS_VALLEYVIEW(dev_priv))
6221                         vlv_disable_pll(dev_priv, pipe);
6222                 else
6223                         i9xx_disable_pll(intel_crtc);
6224         }
6225
6226         intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6227
6228         if (!IS_GEN2(dev_priv))
6229                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6230
6231         if (!dev_priv->display.initial_watermarks)
6232                 intel_update_watermarks(intel_crtc);
6233
6234         /* clock the pipe down to 640x480@60 to potentially save power */
6235         if (IS_I830(dev_priv))
6236                 i830_enable_pipe(dev_priv, pipe);
6237 }
6238
6239 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6240                                         struct drm_modeset_acquire_ctx *ctx)
6241 {
6242         struct intel_encoder *encoder;
6243         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6244         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6245         enum intel_display_power_domain domain;
6246         struct intel_plane *plane;
6247         u64 domains;
6248         struct drm_atomic_state *state;
6249         struct intel_crtc_state *crtc_state;
6250         int ret;
6251
6252         if (!intel_crtc->active)
6253                 return;
6254
6255         for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6256                 const struct intel_plane_state *plane_state =
6257                         to_intel_plane_state(plane->base.state);
6258
6259                 if (plane_state->base.visible)
6260                         intel_plane_disable_noatomic(intel_crtc, plane);
6261         }
6262
6263         state = drm_atomic_state_alloc(crtc->dev);
6264         if (!state) {
6265                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6266                               crtc->base.id, crtc->name);
6267                 return;
6268         }
6269
6270         state->acquire_ctx = ctx;
6271
6272         /* Everything's already locked, -EDEADLK can't happen. */
6273         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6274         ret = drm_atomic_add_affected_connectors(state, crtc);
6275
6276         WARN_ON(IS_ERR(crtc_state) || ret);
6277
6278         dev_priv->display.crtc_disable(crtc_state, state);
6279
6280         drm_atomic_state_put(state);
6281
6282         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6283                       crtc->base.id, crtc->name);
6284
6285         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6286         crtc->state->active = false;
6287         intel_crtc->active = false;
6288         crtc->enabled = false;
6289         crtc->state->connector_mask = 0;
6290         crtc->state->encoder_mask = 0;
6291
6292         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6293                 encoder->base.crtc = NULL;
6294
6295         intel_fbc_disable(intel_crtc);
6296         intel_update_watermarks(intel_crtc);
6297         intel_disable_shared_dpll(intel_crtc);
6298
6299         domains = intel_crtc->enabled_power_domains;
6300         for_each_power_domain(domain, domains)
6301                 intel_display_power_put(dev_priv, domain);
6302         intel_crtc->enabled_power_domains = 0;
6303
6304         dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6305         dev_priv->min_cdclk[intel_crtc->pipe] = 0;
6306         dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
6307 }
6308
6309 /*
6310  * turn all crtc's off, but do not adjust state
6311  * This has to be paired with a call to intel_modeset_setup_hw_state.
6312  */
6313 int intel_display_suspend(struct drm_device *dev)
6314 {
6315         struct drm_i915_private *dev_priv = to_i915(dev);
6316         struct drm_atomic_state *state;
6317         int ret;
6318
6319         state = drm_atomic_helper_suspend(dev);
6320         ret = PTR_ERR_OR_ZERO(state);
6321         if (ret)
6322                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6323         else
6324                 dev_priv->modeset_restore_state = state;
6325         return ret;
6326 }
6327
6328 void intel_encoder_destroy(struct drm_encoder *encoder)
6329 {
6330         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6331
6332         drm_encoder_cleanup(encoder);
6333         kfree(intel_encoder);
6334 }
6335
6336 /* Cross check the actual hw state with our own modeset state tracking (and it's
6337  * internal consistency). */
6338 static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6339                                          struct drm_connector_state *conn_state)
6340 {
6341         struct intel_connector *connector = to_intel_connector(conn_state->connector);
6342
6343         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6344                       connector->base.base.id,
6345                       connector->base.name);
6346
6347         if (connector->get_hw_state(connector)) {
6348                 struct intel_encoder *encoder = connector->encoder;
6349
6350                 I915_STATE_WARN(!crtc_state,
6351                          "connector enabled without attached crtc\n");
6352
6353                 if (!crtc_state)
6354                         return;
6355
6356                 I915_STATE_WARN(!crtc_state->active,
6357                       "connector is active, but attached crtc isn't\n");
6358
6359                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6360                         return;
6361
6362                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6363                         "atomic encoder doesn't match attached encoder\n");
6364
6365                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6366                         "attached encoder crtc differs from connector crtc\n");
6367         } else {
6368                 I915_STATE_WARN(crtc_state && crtc_state->active,
6369                         "attached crtc is active, but connector isn't\n");
6370                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
6371                         "best encoder set without crtc!\n");
6372         }
6373 }
6374
6375 int intel_connector_init(struct intel_connector *connector)
6376 {
6377         struct intel_digital_connector_state *conn_state;
6378
6379         /*
6380          * Allocate enough memory to hold intel_digital_connector_state,
6381          * This might be a few bytes too many, but for connectors that don't
6382          * need it we'll free the state and allocate a smaller one on the first
6383          * succesful commit anyway.
6384          */
6385         conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
6386         if (!conn_state)
6387                 return -ENOMEM;
6388
6389         __drm_atomic_helper_connector_reset(&connector->base,
6390                                             &conn_state->base);
6391
6392         return 0;
6393 }
6394
6395 struct intel_connector *intel_connector_alloc(void)
6396 {
6397         struct intel_connector *connector;
6398
6399         connector = kzalloc(sizeof *connector, GFP_KERNEL);
6400         if (!connector)
6401                 return NULL;
6402
6403         if (intel_connector_init(connector) < 0) {
6404                 kfree(connector);
6405                 return NULL;
6406         }
6407
6408         return connector;
6409 }
6410
6411 /*
6412  * Free the bits allocated by intel_connector_alloc.
6413  * This should only be used after intel_connector_alloc has returned
6414  * successfully, and before drm_connector_init returns successfully.
6415  * Otherwise the destroy callbacks for the connector and the state should
6416  * take care of proper cleanup/free
6417  */
6418 void intel_connector_free(struct intel_connector *connector)
6419 {
6420         kfree(to_intel_digital_connector_state(connector->base.state));
6421         kfree(connector);
6422 }
6423
6424 /* Simple connector->get_hw_state implementation for encoders that support only
6425  * one connector and no cloning and hence the encoder state determines the state
6426  * of the connector. */
6427 bool intel_connector_get_hw_state(struct intel_connector *connector)
6428 {
6429         enum pipe pipe = 0;
6430         struct intel_encoder *encoder = connector->encoder;
6431
6432         return encoder->get_hw_state(encoder, &pipe);
6433 }
6434
6435 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6436 {
6437         if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6438                 return crtc_state->fdi_lanes;
6439
6440         return 0;
6441 }
6442
6443 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6444                                      struct intel_crtc_state *pipe_config)
6445 {
6446         struct drm_i915_private *dev_priv = to_i915(dev);
6447         struct drm_atomic_state *state = pipe_config->base.state;
6448         struct intel_crtc *other_crtc;
6449         struct intel_crtc_state *other_crtc_state;
6450
6451         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6452                       pipe_name(pipe), pipe_config->fdi_lanes);
6453         if (pipe_config->fdi_lanes > 4) {
6454                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6455                               pipe_name(pipe), pipe_config->fdi_lanes);
6456                 return -EINVAL;
6457         }
6458
6459         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
6460                 if (pipe_config->fdi_lanes > 2) {
6461                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6462                                       pipe_config->fdi_lanes);
6463                         return -EINVAL;
6464                 } else {
6465                         return 0;
6466                 }
6467         }
6468
6469         if (INTEL_INFO(dev_priv)->num_pipes == 2)
6470                 return 0;
6471
6472         /* Ivybridge 3 pipe is really complicated */
6473         switch (pipe) {
6474         case PIPE_A:
6475                 return 0;
6476         case PIPE_B:
6477                 if (pipe_config->fdi_lanes <= 2)
6478                         return 0;
6479
6480                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
6481                 other_crtc_state =
6482                         intel_atomic_get_crtc_state(state, other_crtc);
6483                 if (IS_ERR(other_crtc_state))
6484                         return PTR_ERR(other_crtc_state);
6485
6486                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6487                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6488                                       pipe_name(pipe), pipe_config->fdi_lanes);
6489                         return -EINVAL;
6490                 }
6491                 return 0;
6492         case PIPE_C:
6493                 if (pipe_config->fdi_lanes > 2) {
6494                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6495                                       pipe_name(pipe), pipe_config->fdi_lanes);
6496                         return -EINVAL;
6497                 }
6498
6499                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
6500                 other_crtc_state =
6501                         intel_atomic_get_crtc_state(state, other_crtc);
6502                 if (IS_ERR(other_crtc_state))
6503                         return PTR_ERR(other_crtc_state);
6504
6505                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6506                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6507                         return -EINVAL;
6508                 }
6509                 return 0;
6510         default:
6511                 BUG();
6512         }
6513 }
6514
6515 #define RETRY 1
6516 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6517                                        struct intel_crtc_state *pipe_config)
6518 {
6519         struct drm_device *dev = intel_crtc->base.dev;
6520         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6521         int lane, link_bw, fdi_dotclock, ret;
6522         bool needs_recompute = false;
6523
6524 retry:
6525         /* FDI is a binary signal running at ~2.7GHz, encoding
6526          * each output octet as 10 bits. The actual frequency
6527          * is stored as a divider into a 100MHz clock, and the
6528          * mode pixel clock is stored in units of 1KHz.
6529          * Hence the bw of each lane in terms of the mode signal
6530          * is:
6531          */
6532         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6533
6534         fdi_dotclock = adjusted_mode->crtc_clock;
6535
6536         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6537                                            pipe_config->pipe_bpp);
6538
6539         pipe_config->fdi_lanes = lane;
6540
6541         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6542                                link_bw, &pipe_config->fdi_m_n, false);
6543
6544         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6545         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6546                 pipe_config->pipe_bpp -= 2*3;
6547                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6548                               pipe_config->pipe_bpp);
6549                 needs_recompute = true;
6550                 pipe_config->bw_constrained = true;
6551
6552                 goto retry;
6553         }
6554
6555         if (needs_recompute)
6556                 return RETRY;
6557
6558         return ret;
6559 }
6560
6561 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
6562 {
6563         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6564         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6565
6566         /* IPS only exists on ULT machines and is tied to pipe A. */
6567         if (!hsw_crtc_supports_ips(crtc))
6568                 return false;
6569
6570         if (!i915_modparams.enable_ips)
6571                 return false;
6572
6573         if (crtc_state->pipe_bpp > 24)
6574                 return false;
6575
6576         /*
6577          * We compare against max which means we must take
6578          * the increased cdclk requirement into account when
6579          * calculating the new cdclk.
6580          *
6581          * Should measure whether using a lower cdclk w/o IPS
6582          */
6583         if (IS_BROADWELL(dev_priv) &&
6584             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
6585                 return false;
6586
6587         return true;
6588 }
6589
6590 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
6591 {
6592         struct drm_i915_private *dev_priv =
6593                 to_i915(crtc_state->base.crtc->dev);
6594         struct intel_atomic_state *intel_state =
6595                 to_intel_atomic_state(crtc_state->base.state);
6596
6597         if (!hsw_crtc_state_ips_capable(crtc_state))
6598                 return false;
6599
6600         if (crtc_state->ips_force_disable)
6601                 return false;
6602
6603         /* IPS should be fine as long as at least one plane is enabled. */
6604         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
6605                 return false;
6606
6607         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
6608         if (IS_BROADWELL(dev_priv) &&
6609             crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
6610                 return false;
6611
6612         return true;
6613 }
6614
6615 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6616 {
6617         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6618
6619         /* GDG double wide on either pipe, otherwise pipe A only */
6620         return INTEL_GEN(dev_priv) < 4 &&
6621                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6622 }
6623
6624 static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
6625 {
6626         uint32_t pixel_rate;
6627
6628         pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
6629
6630         /*
6631          * We only use IF-ID interlacing. If we ever use
6632          * PF-ID we'll need to adjust the pixel_rate here.
6633          */
6634
6635         if (pipe_config->pch_pfit.enabled) {
6636                 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
6637                 uint32_t pfit_size = pipe_config->pch_pfit.size;
6638
6639                 pipe_w = pipe_config->pipe_src_w;
6640                 pipe_h = pipe_config->pipe_src_h;
6641
6642                 pfit_w = (pfit_size >> 16) & 0xFFFF;
6643                 pfit_h = pfit_size & 0xFFFF;
6644                 if (pipe_w < pfit_w)
6645                         pipe_w = pfit_w;
6646                 if (pipe_h < pfit_h)
6647                         pipe_h = pfit_h;
6648
6649                 if (WARN_ON(!pfit_w || !pfit_h))
6650                         return pixel_rate;
6651
6652                 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
6653                                      pfit_w * pfit_h);
6654         }
6655
6656         return pixel_rate;
6657 }
6658
6659 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
6660 {
6661         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
6662
6663         if (HAS_GMCH_DISPLAY(dev_priv))
6664                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6665                 crtc_state->pixel_rate =
6666                         crtc_state->base.adjusted_mode.crtc_clock;
6667         else
6668                 crtc_state->pixel_rate =
6669                         ilk_pipe_pixel_rate(crtc_state);
6670 }
6671
6672 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6673                                      struct intel_crtc_state *pipe_config)
6674 {
6675         struct drm_device *dev = crtc->base.dev;
6676         struct drm_i915_private *dev_priv = to_i915(dev);
6677         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6678         int clock_limit = dev_priv->max_dotclk_freq;
6679
6680         if (INTEL_GEN(dev_priv) < 4) {
6681                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6682
6683                 /*
6684                  * Enable double wide mode when the dot clock
6685                  * is > 90% of the (display) core speed.
6686                  */
6687                 if (intel_crtc_supports_double_wide(crtc) &&
6688                     adjusted_mode->crtc_clock > clock_limit) {
6689                         clock_limit = dev_priv->max_dotclk_freq;
6690                         pipe_config->double_wide = true;
6691                 }
6692         }
6693
6694         if (adjusted_mode->crtc_clock > clock_limit) {
6695                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6696                               adjusted_mode->crtc_clock, clock_limit,
6697                               yesno(pipe_config->double_wide));
6698                 return -EINVAL;
6699         }
6700
6701         if (pipe_config->ycbcr420 && pipe_config->base.ctm) {
6702                 /*
6703                  * There is only one pipe CSC unit per pipe, and we need that
6704                  * for output conversion from RGB->YCBCR. So if CTM is already
6705                  * applied we can't support YCBCR420 output.
6706                  */
6707                 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
6708                 return -EINVAL;
6709         }
6710
6711         /*
6712          * Pipe horizontal size must be even in:
6713          * - DVO ganged mode
6714          * - LVDS dual channel mode
6715          * - Double wide pipe
6716          */
6717         if (pipe_config->pipe_src_w & 1) {
6718                 if (pipe_config->double_wide) {
6719                         DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
6720                         return -EINVAL;
6721                 }
6722
6723                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6724                     intel_is_dual_link_lvds(dev)) {
6725                         DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
6726                         return -EINVAL;
6727                 }
6728         }
6729
6730         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6731          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6732          */
6733         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
6734                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6735                 return -EINVAL;
6736
6737         intel_crtc_compute_pixel_rate(pipe_config);
6738
6739         if (pipe_config->has_pch_encoder)
6740                 return ironlake_fdi_compute_config(crtc, pipe_config);
6741
6742         return 0;
6743 }
6744
6745 static void
6746 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
6747 {
6748         while (*num > DATA_LINK_M_N_MASK ||
6749                *den > DATA_LINK_M_N_MASK) {
6750                 *num >>= 1;
6751                 *den >>= 1;
6752         }
6753 }
6754
6755 static void compute_m_n(unsigned int m, unsigned int n,
6756                         uint32_t *ret_m, uint32_t *ret_n,
6757                         bool constant_n)
6758 {
6759         /*
6760          * Several DP dongles in particular seem to be fussy about
6761          * too large link M/N values. Give N value as 0x8000 that
6762          * should be acceptable by specific devices. 0x8000 is the
6763          * specified fixed N value for asynchronous clock mode,
6764          * which the devices expect also in synchronous clock mode.
6765          */
6766         if (constant_n)
6767                 *ret_n = 0x8000;
6768         else
6769                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
6770
6771         *ret_m = div_u64((uint64_t) m * *ret_n, n);
6772         intel_reduce_m_n_ratio(ret_m, ret_n);
6773 }
6774
6775 void
6776 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
6777                        int pixel_clock, int link_clock,
6778                        struct intel_link_m_n *m_n,
6779                        bool constant_n)
6780 {
6781         m_n->tu = 64;
6782
6783         compute_m_n(bits_per_pixel * pixel_clock,
6784                     link_clock * nlanes * 8,
6785                     &m_n->gmch_m, &m_n->gmch_n,
6786                     constant_n);
6787
6788         compute_m_n(pixel_clock, link_clock,
6789                     &m_n->link_m, &m_n->link_n,
6790                     constant_n);
6791 }
6792
6793 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
6794 {
6795         if (i915_modparams.panel_use_ssc >= 0)
6796                 return i915_modparams.panel_use_ssc != 0;
6797         return dev_priv->vbt.lvds_use_ssc
6798                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
6799 }
6800
6801 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
6802 {
6803         return (1 << dpll->n) << 16 | dpll->m2;
6804 }
6805
6806 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
6807 {
6808         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
6809 }
6810
6811 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
6812                                      struct intel_crtc_state *crtc_state,
6813                                      struct dpll *reduced_clock)
6814 {
6815         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6816         u32 fp, fp2 = 0;
6817
6818         if (IS_PINEVIEW(dev_priv)) {
6819                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
6820                 if (reduced_clock)
6821                         fp2 = pnv_dpll_compute_fp(reduced_clock);
6822         } else {
6823                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
6824                 if (reduced_clock)
6825                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
6826         }
6827
6828         crtc_state->dpll_hw_state.fp0 = fp;
6829
6830         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
6831             reduced_clock) {
6832                 crtc_state->dpll_hw_state.fp1 = fp2;
6833         } else {
6834                 crtc_state->dpll_hw_state.fp1 = fp;
6835         }
6836 }
6837
6838 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
6839                 pipe)
6840 {
6841         u32 reg_val;
6842
6843         /*
6844          * PLLB opamp always calibrates to max value of 0x3f, force enable it
6845          * and set it to a reasonable value instead.
6846          */
6847         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
6848         reg_val &= 0xffffff00;
6849         reg_val |= 0x00000030;
6850         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
6851
6852         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
6853         reg_val &= 0x00ffffff;
6854         reg_val |= 0x8c000000;
6855         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
6856
6857         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
6858         reg_val &= 0xffffff00;
6859         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
6860
6861         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
6862         reg_val &= 0x00ffffff;
6863         reg_val |= 0xb0000000;
6864         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
6865 }
6866
6867 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
6868                                          struct intel_link_m_n *m_n)
6869 {
6870         struct drm_device *dev = crtc->base.dev;
6871         struct drm_i915_private *dev_priv = to_i915(dev);
6872         int pipe = crtc->pipe;
6873
6874         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6875         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
6876         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
6877         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
6878 }
6879
6880 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
6881                                          struct intel_link_m_n *m_n,
6882                                          struct intel_link_m_n *m2_n2)
6883 {
6884         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6885         int pipe = crtc->pipe;
6886         enum transcoder transcoder = crtc->config->cpu_transcoder;
6887
6888         if (INTEL_GEN(dev_priv) >= 5) {
6889                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
6890                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
6891                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
6892                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
6893                 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
6894                  * for gen < 8) and if DRRS is supported (to make sure the
6895                  * registers are not unnecessarily accessed).
6896                  */
6897                 if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
6898                     INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
6899                         I915_WRITE(PIPE_DATA_M2(transcoder),
6900                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
6901                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
6902                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
6903                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
6904                 }
6905         } else {
6906                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6907                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
6908                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
6909                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
6910         }
6911 }
6912
6913 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
6914 {
6915         struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
6916
6917         if (m_n == M1_N1) {
6918                 dp_m_n = &crtc->config->dp_m_n;
6919                 dp_m2_n2 = &crtc->config->dp_m2_n2;
6920         } else if (m_n == M2_N2) {
6921
6922                 /*
6923                  * M2_N2 registers are not supported. Hence m2_n2 divider value
6924                  * needs to be programmed into M1_N1.
6925                  */
6926                 dp_m_n = &crtc->config->dp_m2_n2;
6927         } else {
6928                 DRM_ERROR("Unsupported divider value\n");
6929                 return;
6930         }
6931
6932         if (crtc->config->has_pch_encoder)
6933                 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
6934         else
6935                 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
6936 }
6937
6938 static void vlv_compute_dpll(struct intel_crtc *crtc,
6939                              struct intel_crtc_state *pipe_config)
6940 {
6941         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
6942                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
6943         if (crtc->pipe != PIPE_A)
6944                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6945
6946         /* DPLL not used with DSI, but still need the rest set up */
6947         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
6948                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
6949                         DPLL_EXT_BUFFER_ENABLE_VLV;
6950
6951         pipe_config->dpll_hw_state.dpll_md =
6952                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6953 }
6954
6955 static void chv_compute_dpll(struct intel_crtc *crtc,
6956                              struct intel_crtc_state *pipe_config)
6957 {
6958         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
6959                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
6960         if (crtc->pipe != PIPE_A)
6961                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6962
6963         /* DPLL not used with DSI, but still need the rest set up */
6964         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
6965                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
6966
6967         pipe_config->dpll_hw_state.dpll_md =
6968                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6969 }
6970
6971 static void vlv_prepare_pll(struct intel_crtc *crtc,
6972                             const struct intel_crtc_state *pipe_config)
6973 {
6974         struct drm_device *dev = crtc->base.dev;
6975         struct drm_i915_private *dev_priv = to_i915(dev);
6976         enum pipe pipe = crtc->pipe;
6977         u32 mdiv;
6978         u32 bestn, bestm1, bestm2, bestp1, bestp2;
6979         u32 coreclk, reg_val;
6980
6981         /* Enable Refclk */
6982         I915_WRITE(DPLL(pipe),
6983                    pipe_config->dpll_hw_state.dpll &
6984                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
6985
6986         /* No need to actually set up the DPLL with DSI */
6987         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
6988                 return;
6989
6990         mutex_lock(&dev_priv->sb_lock);
6991
6992         bestn = pipe_config->dpll.n;
6993         bestm1 = pipe_config->dpll.m1;
6994         bestm2 = pipe_config->dpll.m2;
6995         bestp1 = pipe_config->dpll.p1;
6996         bestp2 = pipe_config->dpll.p2;
6997
6998         /* See eDP HDMI DPIO driver vbios notes doc */
6999
7000         /* PLL B needs special handling */
7001         if (pipe == PIPE_B)
7002                 vlv_pllb_recal_opamp(dev_priv, pipe);
7003
7004         /* Set up Tx target for periodic Rcomp update */
7005         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7006
7007         /* Disable target IRef on PLL */
7008         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7009         reg_val &= 0x00ffffff;
7010         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7011
7012         /* Disable fast lock */
7013         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7014
7015         /* Set idtafcrecal before PLL is enabled */
7016         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7017         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7018         mdiv |= ((bestn << DPIO_N_SHIFT));
7019         mdiv |= (1 << DPIO_K_SHIFT);
7020
7021         /*
7022          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7023          * but we don't support that).
7024          * Note: don't use the DAC post divider as it seems unstable.
7025          */
7026         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7027         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7028
7029         mdiv |= DPIO_ENABLE_CALIBRATION;
7030         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7031
7032         /* Set HBR and RBR LPF coefficients */
7033         if (pipe_config->port_clock == 162000 ||
7034             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
7035             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
7036                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7037                                  0x009f0003);
7038         else
7039                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7040                                  0x00d0000f);
7041
7042         if (intel_crtc_has_dp_encoder(pipe_config)) {
7043                 /* Use SSC source */
7044                 if (pipe == PIPE_A)
7045                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7046                                          0x0df40000);
7047                 else
7048                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7049                                          0x0df70000);
7050         } else { /* HDMI or VGA */
7051                 /* Use bend source */
7052                 if (pipe == PIPE_A)
7053                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7054                                          0x0df70000);
7055                 else
7056                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7057                                          0x0df40000);
7058         }
7059
7060         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7061         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7062         if (intel_crtc_has_dp_encoder(crtc->config))
7063                 coreclk |= 0x01000000;
7064         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7065
7066         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7067         mutex_unlock(&dev_priv->sb_lock);
7068 }
7069
7070 static void chv_prepare_pll(struct intel_crtc *crtc,
7071                             const struct intel_crtc_state *pipe_config)
7072 {
7073         struct drm_device *dev = crtc->base.dev;
7074         struct drm_i915_private *dev_priv = to_i915(dev);
7075         enum pipe pipe = crtc->pipe;
7076         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7077         u32 loopfilter, tribuf_calcntr;
7078         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7079         u32 dpio_val;
7080         int vco;
7081
7082         /* Enable Refclk and SSC */
7083         I915_WRITE(DPLL(pipe),
7084                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7085
7086         /* No need to actually set up the DPLL with DSI */
7087         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7088                 return;
7089
7090         bestn = pipe_config->dpll.n;
7091         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7092         bestm1 = pipe_config->dpll.m1;
7093         bestm2 = pipe_config->dpll.m2 >> 22;
7094         bestp1 = pipe_config->dpll.p1;
7095         bestp2 = pipe_config->dpll.p2;
7096         vco = pipe_config->dpll.vco;
7097         dpio_val = 0;
7098         loopfilter = 0;
7099
7100         mutex_lock(&dev_priv->sb_lock);
7101
7102         /* p1 and p2 divider */
7103         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7104                         5 << DPIO_CHV_S1_DIV_SHIFT |
7105                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7106                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7107                         1 << DPIO_CHV_K_DIV_SHIFT);
7108
7109         /* Feedback post-divider - m2 */
7110         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7111
7112         /* Feedback refclk divider - n and m1 */
7113         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7114                         DPIO_CHV_M1_DIV_BY_2 |
7115                         1 << DPIO_CHV_N_DIV_SHIFT);
7116
7117         /* M2 fraction division */
7118         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7119
7120         /* M2 fraction division enable */
7121         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7122         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7123         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7124         if (bestm2_frac)
7125                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7126         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7127
7128         /* Program digital lock detect threshold */
7129         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7130         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7131                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7132         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7133         if (!bestm2_frac)
7134                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7135         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7136
7137         /* Loop filter */
7138         if (vco == 5400000) {
7139                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7140                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7141                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7142                 tribuf_calcntr = 0x9;
7143         } else if (vco <= 6200000) {
7144                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7145                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7146                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7147                 tribuf_calcntr = 0x9;
7148         } else if (vco <= 6480000) {
7149                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7150                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7151                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7152                 tribuf_calcntr = 0x8;
7153         } else {
7154                 /* Not supported. Apply the same limits as in the max case */
7155                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7156                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7157                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7158                 tribuf_calcntr = 0;
7159         }
7160         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7161
7162         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7163         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7164         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7165         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7166
7167         /* AFC Recal */
7168         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7169                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7170                         DPIO_AFC_RECAL);
7171
7172         mutex_unlock(&dev_priv->sb_lock);
7173 }
7174
7175 /**
7176  * vlv_force_pll_on - forcibly enable just the PLL
7177  * @dev_priv: i915 private structure
7178  * @pipe: pipe PLL to enable
7179  * @dpll: PLL configuration
7180  *
7181  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7182  * in cases where we need the PLL enabled even when @pipe is not going to
7183  * be enabled.
7184  */
7185 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7186                      const struct dpll *dpll)
7187 {
7188         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7189         struct intel_crtc_state *pipe_config;
7190
7191         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7192         if (!pipe_config)
7193                 return -ENOMEM;
7194
7195         pipe_config->base.crtc = &crtc->base;
7196         pipe_config->pixel_multiplier = 1;
7197         pipe_config->dpll = *dpll;
7198
7199         if (IS_CHERRYVIEW(dev_priv)) {
7200                 chv_compute_dpll(crtc, pipe_config);
7201                 chv_prepare_pll(crtc, pipe_config);
7202                 chv_enable_pll(crtc, pipe_config);
7203         } else {
7204                 vlv_compute_dpll(crtc, pipe_config);
7205                 vlv_prepare_pll(crtc, pipe_config);
7206                 vlv_enable_pll(crtc, pipe_config);
7207         }
7208
7209         kfree(pipe_config);
7210
7211         return 0;
7212 }
7213
7214 /**
7215  * vlv_force_pll_off - forcibly disable just the PLL
7216  * @dev_priv: i915 private structure
7217  * @pipe: pipe PLL to disable
7218  *
7219  * Disable the PLL for @pipe. To be used in cases where we need
7220  * the PLL enabled even when @pipe is not going to be enabled.
7221  */
7222 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7223 {
7224         if (IS_CHERRYVIEW(dev_priv))
7225                 chv_disable_pll(dev_priv, pipe);
7226         else
7227                 vlv_disable_pll(dev_priv, pipe);
7228 }
7229
7230 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7231                               struct intel_crtc_state *crtc_state,
7232                               struct dpll *reduced_clock)
7233 {
7234         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7235         u32 dpll;
7236         struct dpll *clock = &crtc_state->dpll;
7237
7238         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7239
7240         dpll = DPLL_VGA_MODE_DIS;
7241
7242         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7243                 dpll |= DPLLB_MODE_LVDS;
7244         else
7245                 dpll |= DPLLB_MODE_DAC_SERIAL;
7246
7247         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7248             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7249                 dpll |= (crtc_state->pixel_multiplier - 1)
7250                         << SDVO_MULTIPLIER_SHIFT_HIRES;
7251         }
7252
7253         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7254             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7255                 dpll |= DPLL_SDVO_HIGH_SPEED;
7256
7257         if (intel_crtc_has_dp_encoder(crtc_state))
7258                 dpll |= DPLL_SDVO_HIGH_SPEED;
7259
7260         /* compute bitmask from p1 value */
7261         if (IS_PINEVIEW(dev_priv))
7262                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7263         else {
7264                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7265                 if (IS_G4X(dev_priv) && reduced_clock)
7266                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7267         }
7268         switch (clock->p2) {
7269         case 5:
7270                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7271                 break;
7272         case 7:
7273                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7274                 break;
7275         case 10:
7276                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7277                 break;
7278         case 14:
7279                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7280                 break;
7281         }
7282         if (INTEL_GEN(dev_priv) >= 4)
7283                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7284
7285         if (crtc_state->sdvo_tv_clock)
7286                 dpll |= PLL_REF_INPUT_TVCLKINBC;
7287         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7288                  intel_panel_use_ssc(dev_priv))
7289                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7290         else
7291                 dpll |= PLL_REF_INPUT_DREFCLK;
7292
7293         dpll |= DPLL_VCO_ENABLE;
7294         crtc_state->dpll_hw_state.dpll = dpll;
7295
7296         if (INTEL_GEN(dev_priv) >= 4) {
7297                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7298                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7299                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7300         }
7301 }
7302
7303 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7304                               struct intel_crtc_state *crtc_state,
7305                               struct dpll *reduced_clock)
7306 {
7307         struct drm_device *dev = crtc->base.dev;
7308         struct drm_i915_private *dev_priv = to_i915(dev);
7309         u32 dpll;
7310         struct dpll *clock = &crtc_state->dpll;
7311
7312         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7313
7314         dpll = DPLL_VGA_MODE_DIS;
7315
7316         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7317                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7318         } else {
7319                 if (clock->p1 == 2)
7320                         dpll |= PLL_P1_DIVIDE_BY_TWO;
7321                 else
7322                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7323                 if (clock->p2 == 4)
7324                         dpll |= PLL_P2_DIVIDE_BY_4;
7325         }
7326
7327         if (!IS_I830(dev_priv) &&
7328             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7329                 dpll |= DPLL_DVO_2X_MODE;
7330
7331         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7332             intel_panel_use_ssc(dev_priv))
7333                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7334         else
7335                 dpll |= PLL_REF_INPUT_DREFCLK;
7336
7337         dpll |= DPLL_VCO_ENABLE;
7338         crtc_state->dpll_hw_state.dpll = dpll;
7339 }
7340
7341 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
7342 {
7343         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7344         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7345         enum pipe pipe = crtc->pipe;
7346         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7347         const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
7348         uint32_t crtc_vtotal, crtc_vblank_end;
7349         int vsyncshift = 0;
7350
7351         /* We need to be careful not to changed the adjusted mode, for otherwise
7352          * the hw state checker will get angry at the mismatch. */
7353         crtc_vtotal = adjusted_mode->crtc_vtotal;
7354         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7355
7356         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7357                 /* the chip adds 2 halflines automatically */
7358                 crtc_vtotal -= 1;
7359                 crtc_vblank_end -= 1;
7360
7361                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7362                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7363                 else
7364                         vsyncshift = adjusted_mode->crtc_hsync_start -
7365                                 adjusted_mode->crtc_htotal / 2;
7366                 if (vsyncshift < 0)
7367                         vsyncshift += adjusted_mode->crtc_htotal;
7368         }
7369
7370         if (INTEL_GEN(dev_priv) > 3)
7371                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7372
7373         I915_WRITE(HTOTAL(cpu_transcoder),
7374                    (adjusted_mode->crtc_hdisplay - 1) |
7375                    ((adjusted_mode->crtc_htotal - 1) << 16));
7376         I915_WRITE(HBLANK(cpu_transcoder),
7377                    (adjusted_mode->crtc_hblank_start - 1) |
7378                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
7379         I915_WRITE(HSYNC(cpu_transcoder),
7380                    (adjusted_mode->crtc_hsync_start - 1) |
7381                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
7382
7383         I915_WRITE(VTOTAL(cpu_transcoder),
7384                    (adjusted_mode->crtc_vdisplay - 1) |
7385                    ((crtc_vtotal - 1) << 16));
7386         I915_WRITE(VBLANK(cpu_transcoder),
7387                    (adjusted_mode->crtc_vblank_start - 1) |
7388                    ((crtc_vblank_end - 1) << 16));
7389         I915_WRITE(VSYNC(cpu_transcoder),
7390                    (adjusted_mode->crtc_vsync_start - 1) |
7391                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
7392
7393         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7394          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7395          * documented on the DDI_FUNC_CTL register description, EDP Input Select
7396          * bits. */
7397         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
7398             (pipe == PIPE_B || pipe == PIPE_C))
7399                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7400
7401 }
7402
7403 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
7404 {
7405         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7406         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7407         enum pipe pipe = crtc->pipe;
7408
7409         /* pipesrc controls the size that is scaled from, which should
7410          * always be the user's requested size.
7411          */
7412         I915_WRITE(PIPESRC(pipe),
7413                    ((crtc_state->pipe_src_w - 1) << 16) |
7414                    (crtc_state->pipe_src_h - 1));
7415 }
7416
7417 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7418                                    struct intel_crtc_state *pipe_config)
7419 {
7420         struct drm_device *dev = crtc->base.dev;
7421         struct drm_i915_private *dev_priv = to_i915(dev);
7422         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7423         uint32_t tmp;
7424
7425         tmp = I915_READ(HTOTAL(cpu_transcoder));
7426         pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7427         pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7428         tmp = I915_READ(HBLANK(cpu_transcoder));
7429         pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7430         pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7431         tmp = I915_READ(HSYNC(cpu_transcoder));
7432         pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7433         pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7434
7435         tmp = I915_READ(VTOTAL(cpu_transcoder));
7436         pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7437         pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7438         tmp = I915_READ(VBLANK(cpu_transcoder));
7439         pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7440         pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7441         tmp = I915_READ(VSYNC(cpu_transcoder));
7442         pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7443         pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7444
7445         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7446                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7447                 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7448                 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7449         }
7450 }
7451
7452 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7453                                     struct intel_crtc_state *pipe_config)
7454 {
7455         struct drm_device *dev = crtc->base.dev;
7456         struct drm_i915_private *dev_priv = to_i915(dev);
7457         u32 tmp;
7458
7459         tmp = I915_READ(PIPESRC(crtc->pipe));
7460         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7461         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7462
7463         pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7464         pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7465 }
7466
7467 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7468                                  struct intel_crtc_state *pipe_config)
7469 {
7470         mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7471         mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7472         mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7473         mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7474
7475         mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7476         mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7477         mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7478         mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7479
7480         mode->flags = pipe_config->base.adjusted_mode.flags;
7481         mode->type = DRM_MODE_TYPE_DRIVER;
7482
7483         mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7484
7485         mode->hsync = drm_mode_hsync(mode);
7486         mode->vrefresh = drm_mode_vrefresh(mode);
7487         drm_mode_set_name(mode);
7488 }
7489
7490 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
7491 {
7492         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7493         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7494         uint32_t pipeconf;
7495
7496         pipeconf = 0;
7497
7498         /* we keep both pipes enabled on 830 */
7499         if (IS_I830(dev_priv))
7500                 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
7501
7502         if (crtc_state->double_wide)
7503                 pipeconf |= PIPECONF_DOUBLE_WIDE;
7504
7505         /* only g4x and later have fancy bpc/dither controls */
7506         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7507             IS_CHERRYVIEW(dev_priv)) {
7508                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7509                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
7510                         pipeconf |= PIPECONF_DITHER_EN |
7511                                     PIPECONF_DITHER_TYPE_SP;
7512
7513                 switch (crtc_state->pipe_bpp) {
7514                 case 18:
7515                         pipeconf |= PIPECONF_6BPC;
7516                         break;
7517                 case 24:
7518                         pipeconf |= PIPECONF_8BPC;
7519                         break;
7520                 case 30:
7521                         pipeconf |= PIPECONF_10BPC;
7522                         break;
7523                 default:
7524                         /* Case prevented by intel_choose_pipe_bpp_dither. */
7525                         BUG();
7526                 }
7527         }
7528
7529         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7530                 if (INTEL_GEN(dev_priv) < 4 ||
7531                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7532                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7533                 else
7534                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7535         } else
7536                 pipeconf |= PIPECONF_PROGRESSIVE;
7537
7538         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7539              crtc_state->limited_color_range)
7540                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7541
7542         I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
7543         POSTING_READ(PIPECONF(crtc->pipe));
7544 }
7545
7546 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7547                                    struct intel_crtc_state *crtc_state)
7548 {
7549         struct drm_device *dev = crtc->base.dev;
7550         struct drm_i915_private *dev_priv = to_i915(dev);
7551         const struct intel_limit *limit;
7552         int refclk = 48000;
7553
7554         memset(&crtc_state->dpll_hw_state, 0,
7555                sizeof(crtc_state->dpll_hw_state));
7556
7557         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7558                 if (intel_panel_use_ssc(dev_priv)) {
7559                         refclk = dev_priv->vbt.lvds_ssc_freq;
7560                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7561                 }
7562
7563                 limit = &intel_limits_i8xx_lvds;
7564         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
7565                 limit = &intel_limits_i8xx_dvo;
7566         } else {
7567                 limit = &intel_limits_i8xx_dac;
7568         }
7569
7570         if (!crtc_state->clock_set &&
7571             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7572                                  refclk, NULL, &crtc_state->dpll)) {
7573                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7574                 return -EINVAL;
7575         }
7576
7577         i8xx_compute_dpll(crtc, crtc_state, NULL);
7578
7579         return 0;
7580 }
7581
7582 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7583                                   struct intel_crtc_state *crtc_state)
7584 {
7585         struct drm_device *dev = crtc->base.dev;
7586         struct drm_i915_private *dev_priv = to_i915(dev);
7587         const struct intel_limit *limit;
7588         int refclk = 96000;
7589
7590         memset(&crtc_state->dpll_hw_state, 0,
7591                sizeof(crtc_state->dpll_hw_state));
7592
7593         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7594                 if (intel_panel_use_ssc(dev_priv)) {
7595                         refclk = dev_priv->vbt.lvds_ssc_freq;
7596                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7597                 }
7598
7599                 if (intel_is_dual_link_lvds(dev))
7600                         limit = &intel_limits_g4x_dual_channel_lvds;
7601                 else
7602                         limit = &intel_limits_g4x_single_channel_lvds;
7603         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7604                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7605                 limit = &intel_limits_g4x_hdmi;
7606         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7607                 limit = &intel_limits_g4x_sdvo;
7608         } else {
7609                 /* The option is for other outputs */
7610                 limit = &intel_limits_i9xx_sdvo;
7611         }
7612
7613         if (!crtc_state->clock_set &&
7614             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7615                                 refclk, NULL, &crtc_state->dpll)) {
7616                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7617                 return -EINVAL;
7618         }
7619
7620         i9xx_compute_dpll(crtc, crtc_state, NULL);
7621
7622         return 0;
7623 }
7624
7625 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7626                                   struct intel_crtc_state *crtc_state)
7627 {
7628         struct drm_device *dev = crtc->base.dev;
7629         struct drm_i915_private *dev_priv = to_i915(dev);
7630         const struct intel_limit *limit;
7631         int refclk = 96000;
7632
7633         memset(&crtc_state->dpll_hw_state, 0,
7634                sizeof(crtc_state->dpll_hw_state));
7635
7636         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7637                 if (intel_panel_use_ssc(dev_priv)) {
7638                         refclk = dev_priv->vbt.lvds_ssc_freq;
7639                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7640                 }
7641
7642                 limit = &intel_limits_pineview_lvds;
7643         } else {
7644                 limit = &intel_limits_pineview_sdvo;
7645         }
7646
7647         if (!crtc_state->clock_set &&
7648             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7649                                 refclk, NULL, &crtc_state->dpll)) {
7650                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7651                 return -EINVAL;
7652         }
7653
7654         i9xx_compute_dpll(crtc, crtc_state, NULL);
7655
7656         return 0;
7657 }
7658
7659 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7660                                    struct intel_crtc_state *crtc_state)
7661 {
7662         struct drm_device *dev = crtc->base.dev;
7663         struct drm_i915_private *dev_priv = to_i915(dev);
7664         const struct intel_limit *limit;
7665         int refclk = 96000;
7666
7667         memset(&crtc_state->dpll_hw_state, 0,
7668                sizeof(crtc_state->dpll_hw_state));
7669
7670         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7671                 if (intel_panel_use_ssc(dev_priv)) {
7672                         refclk = dev_priv->vbt.lvds_ssc_freq;
7673                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7674                 }
7675
7676                 limit = &intel_limits_i9xx_lvds;
7677         } else {
7678                 limit = &intel_limits_i9xx_sdvo;
7679         }
7680
7681         if (!crtc_state->clock_set &&
7682             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7683                                  refclk, NULL, &crtc_state->dpll)) {
7684                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7685                 return -EINVAL;
7686         }
7687
7688         i9xx_compute_dpll(crtc, crtc_state, NULL);
7689
7690         return 0;
7691 }
7692
7693 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7694                                   struct intel_crtc_state *crtc_state)
7695 {
7696         int refclk = 100000;
7697         const struct intel_limit *limit = &intel_limits_chv;
7698
7699         memset(&crtc_state->dpll_hw_state, 0,
7700                sizeof(crtc_state->dpll_hw_state));
7701
7702         if (!crtc_state->clock_set &&
7703             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7704                                 refclk, NULL, &crtc_state->dpll)) {
7705                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7706                 return -EINVAL;
7707         }
7708
7709         chv_compute_dpll(crtc, crtc_state);
7710
7711         return 0;
7712 }
7713
7714 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7715                                   struct intel_crtc_state *crtc_state)
7716 {
7717         int refclk = 100000;
7718         const struct intel_limit *limit = &intel_limits_vlv;
7719
7720         memset(&crtc_state->dpll_hw_state, 0,
7721                sizeof(crtc_state->dpll_hw_state));
7722
7723         if (!crtc_state->clock_set &&
7724             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7725                                 refclk, NULL, &crtc_state->dpll)) {
7726                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7727                 return -EINVAL;
7728         }
7729
7730         vlv_compute_dpll(crtc, crtc_state);
7731
7732         return 0;
7733 }
7734
7735 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
7736                                  struct intel_crtc_state *pipe_config)
7737 {
7738         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7739         uint32_t tmp;
7740
7741         if (INTEL_GEN(dev_priv) <= 3 &&
7742             (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
7743                 return;
7744
7745         tmp = I915_READ(PFIT_CONTROL);
7746         if (!(tmp & PFIT_ENABLE))
7747                 return;
7748
7749         /* Check whether the pfit is attached to our pipe. */
7750         if (INTEL_GEN(dev_priv) < 4) {
7751                 if (crtc->pipe != PIPE_B)
7752                         return;
7753         } else {
7754                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7755                         return;
7756         }
7757
7758         pipe_config->gmch_pfit.control = tmp;
7759         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
7760 }
7761
7762 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
7763                                struct intel_crtc_state *pipe_config)
7764 {
7765         struct drm_device *dev = crtc->base.dev;
7766         struct drm_i915_private *dev_priv = to_i915(dev);
7767         int pipe = pipe_config->cpu_transcoder;
7768         struct dpll clock;
7769         u32 mdiv;
7770         int refclk = 100000;
7771
7772         /* In case of DSI, DPLL will not be used */
7773         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7774                 return;
7775
7776         mutex_lock(&dev_priv->sb_lock);
7777         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
7778         mutex_unlock(&dev_priv->sb_lock);
7779
7780         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
7781         clock.m2 = mdiv & DPIO_M2DIV_MASK;
7782         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
7783         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
7784         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
7785
7786         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
7787 }
7788
7789 static void
7790 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
7791                               struct intel_initial_plane_config *plane_config)
7792 {
7793         struct drm_device *dev = crtc->base.dev;
7794         struct drm_i915_private *dev_priv = to_i915(dev);
7795         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
7796         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
7797         enum pipe pipe;
7798         u32 val, base, offset;
7799         int fourcc, pixel_format;
7800         unsigned int aligned_height;
7801         struct drm_framebuffer *fb;
7802         struct intel_framebuffer *intel_fb;
7803
7804         if (!plane->get_hw_state(plane, &pipe))
7805                 return;
7806
7807         WARN_ON(pipe != crtc->pipe);
7808
7809         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7810         if (!intel_fb) {
7811                 DRM_DEBUG_KMS("failed to alloc fb\n");
7812                 return;
7813         }
7814
7815         fb = &intel_fb->base;
7816
7817         fb->dev = dev;
7818
7819         val = I915_READ(DSPCNTR(i9xx_plane));
7820
7821         if (INTEL_GEN(dev_priv) >= 4) {
7822                 if (val & DISPPLANE_TILED) {
7823                         plane_config->tiling = I915_TILING_X;
7824                         fb->modifier = I915_FORMAT_MOD_X_TILED;
7825                 }
7826         }
7827
7828         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7829         fourcc = i9xx_format_to_fourcc(pixel_format);
7830         fb->format = drm_format_info(fourcc);
7831
7832         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7833                 offset = I915_READ(DSPOFFSET(i9xx_plane));
7834                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
7835         } else if (INTEL_GEN(dev_priv) >= 4) {
7836                 if (plane_config->tiling)
7837                         offset = I915_READ(DSPTILEOFF(i9xx_plane));
7838                 else
7839                         offset = I915_READ(DSPLINOFF(i9xx_plane));
7840                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
7841         } else {
7842                 base = I915_READ(DSPADDR(i9xx_plane));
7843         }
7844         plane_config->base = base;
7845
7846         val = I915_READ(PIPESRC(pipe));
7847         fb->width = ((val >> 16) & 0xfff) + 1;
7848         fb->height = ((val >> 0) & 0xfff) + 1;
7849
7850         val = I915_READ(DSPSTRIDE(i9xx_plane));
7851         fb->pitches[0] = val & 0xffffffc0;
7852
7853         aligned_height = intel_fb_align_height(fb, 0, fb->height);
7854
7855         plane_config->size = fb->pitches[0] * aligned_height;
7856
7857         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7858                       crtc->base.name, plane->base.name, fb->width, fb->height,
7859                       fb->format->cpp[0] * 8, base, fb->pitches[0],
7860                       plane_config->size);
7861
7862         plane_config->fb = intel_fb;
7863 }
7864
7865 static void chv_crtc_clock_get(struct intel_crtc *crtc,
7866                                struct intel_crtc_state *pipe_config)
7867 {
7868         struct drm_device *dev = crtc->base.dev;
7869         struct drm_i915_private *dev_priv = to_i915(dev);
7870         int pipe = pipe_config->cpu_transcoder;
7871         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7872         struct dpll clock;
7873         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
7874         int refclk = 100000;
7875
7876         /* In case of DSI, DPLL will not be used */
7877         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7878                 return;
7879
7880         mutex_lock(&dev_priv->sb_lock);
7881         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
7882         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
7883         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
7884         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
7885         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7886         mutex_unlock(&dev_priv->sb_lock);
7887
7888         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
7889         clock.m2 = (pll_dw0 & 0xff) << 22;
7890         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
7891                 clock.m2 |= pll_dw2 & 0x3fffff;
7892         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
7893         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
7894         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
7895
7896         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
7897 }
7898
7899 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
7900                                  struct intel_crtc_state *pipe_config)
7901 {
7902         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7903         enum intel_display_power_domain power_domain;
7904         uint32_t tmp;
7905         bool ret;
7906
7907         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
7908         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
7909                 return false;
7910
7911         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7912         pipe_config->shared_dpll = NULL;
7913
7914         ret = false;
7915
7916         tmp = I915_READ(PIPECONF(crtc->pipe));
7917         if (!(tmp & PIPECONF_ENABLE))
7918                 goto out;
7919
7920         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7921             IS_CHERRYVIEW(dev_priv)) {
7922                 switch (tmp & PIPECONF_BPC_MASK) {
7923                 case PIPECONF_6BPC:
7924                         pipe_config->pipe_bpp = 18;
7925                         break;
7926                 case PIPECONF_8BPC:
7927                         pipe_config->pipe_bpp = 24;
7928                         break;
7929                 case PIPECONF_10BPC:
7930                         pipe_config->pipe_bpp = 30;
7931                         break;
7932                 default:
7933                         break;
7934                 }
7935         }
7936
7937         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7938             (tmp & PIPECONF_COLOR_RANGE_SELECT))
7939                 pipe_config->limited_color_range = true;
7940
7941         if (INTEL_GEN(dev_priv) < 4)
7942                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
7943
7944         intel_get_pipe_timings(crtc, pipe_config);
7945         intel_get_pipe_src_size(crtc, pipe_config);
7946
7947         i9xx_get_pfit_config(crtc, pipe_config);
7948
7949         if (INTEL_GEN(dev_priv) >= 4) {
7950                 /* No way to read it out on pipes B and C */
7951                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
7952                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
7953                 else
7954                         tmp = I915_READ(DPLL_MD(crtc->pipe));
7955                 pipe_config->pixel_multiplier =
7956                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
7957                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
7958                 pipe_config->dpll_hw_state.dpll_md = tmp;
7959         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7960                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7961                 tmp = I915_READ(DPLL(crtc->pipe));
7962                 pipe_config->pixel_multiplier =
7963                         ((tmp & SDVO_MULTIPLIER_MASK)
7964                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
7965         } else {
7966                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
7967                  * port and will be fixed up in the encoder->get_config
7968                  * function. */
7969                 pipe_config->pixel_multiplier = 1;
7970         }
7971         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
7972         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
7973                 /*
7974                  * DPLL_DVO_2X_MODE must be enabled for both DPLLs
7975                  * on 830. Filter it out here so that we don't
7976                  * report errors due to that.
7977                  */
7978                 if (IS_I830(dev_priv))
7979                         pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
7980
7981                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
7982                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
7983         } else {
7984                 /* Mask out read-only status bits. */
7985                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
7986                                                      DPLL_PORTC_READY_MASK |
7987                                                      DPLL_PORTB_READY_MASK);
7988         }
7989
7990         if (IS_CHERRYVIEW(dev_priv))
7991                 chv_crtc_clock_get(crtc, pipe_config);
7992         else if (IS_VALLEYVIEW(dev_priv))
7993                 vlv_crtc_clock_get(crtc, pipe_config);
7994         else
7995                 i9xx_crtc_clock_get(crtc, pipe_config);
7996
7997         /*
7998          * Normally the dotclock is filled in by the encoder .get_config()
7999          * but in case the pipe is enabled w/o any ports we need a sane
8000          * default.
8001          */
8002         pipe_config->base.adjusted_mode.crtc_clock =
8003                 pipe_config->port_clock / pipe_config->pixel_multiplier;
8004
8005         ret = true;
8006
8007 out:
8008         intel_display_power_put(dev_priv, power_domain);
8009
8010         return ret;
8011 }
8012
8013 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
8014 {
8015         struct intel_encoder *encoder;
8016         int i;
8017         u32 val, final;
8018         bool has_lvds = false;
8019         bool has_cpu_edp = false;
8020         bool has_panel = false;
8021         bool has_ck505 = false;
8022         bool can_ssc = false;
8023         bool using_ssc_source = false;
8024
8025         /* We need to take the global config into account */
8026         for_each_intel_encoder(&dev_priv->drm, encoder) {
8027                 switch (encoder->type) {
8028                 case INTEL_OUTPUT_LVDS:
8029                         has_panel = true;
8030                         has_lvds = true;
8031                         break;
8032                 case INTEL_OUTPUT_EDP:
8033                         has_panel = true;
8034                         if (encoder->port == PORT_A)
8035                                 has_cpu_edp = true;
8036                         break;
8037                 default:
8038                         break;
8039                 }
8040         }
8041
8042         if (HAS_PCH_IBX(dev_priv)) {
8043                 has_ck505 = dev_priv->vbt.display_clock_mode;
8044                 can_ssc = has_ck505;
8045         } else {
8046                 has_ck505 = false;
8047                 can_ssc = true;
8048         }
8049
8050         /* Check if any DPLLs are using the SSC source */
8051         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8052                 u32 temp = I915_READ(PCH_DPLL(i));
8053
8054                 if (!(temp & DPLL_VCO_ENABLE))
8055                         continue;
8056
8057                 if ((temp & PLL_REF_INPUT_MASK) ==
8058                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8059                         using_ssc_source = true;
8060                         break;
8061                 }
8062         }
8063
8064         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8065                       has_panel, has_lvds, has_ck505, using_ssc_source);
8066
8067         /* Ironlake: try to setup display ref clock before DPLL
8068          * enabling. This is only under driver's control after
8069          * PCH B stepping, previous chipset stepping should be
8070          * ignoring this setting.
8071          */
8072         val = I915_READ(PCH_DREF_CONTROL);
8073
8074         /* As we must carefully and slowly disable/enable each source in turn,
8075          * compute the final state we want first and check if we need to
8076          * make any changes at all.
8077          */
8078         final = val;
8079         final &= ~DREF_NONSPREAD_SOURCE_MASK;
8080         if (has_ck505)
8081                 final |= DREF_NONSPREAD_CK505_ENABLE;
8082         else
8083                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8084
8085         final &= ~DREF_SSC_SOURCE_MASK;
8086         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8087         final &= ~DREF_SSC1_ENABLE;
8088
8089         if (has_panel) {
8090                 final |= DREF_SSC_SOURCE_ENABLE;
8091
8092                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8093                         final |= DREF_SSC1_ENABLE;
8094
8095                 if (has_cpu_edp) {
8096                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
8097                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8098                         else
8099                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8100                 } else
8101                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8102         } else if (using_ssc_source) {
8103                 final |= DREF_SSC_SOURCE_ENABLE;
8104                 final |= DREF_SSC1_ENABLE;
8105         }
8106
8107         if (final == val)
8108                 return;
8109
8110         /* Always enable nonspread source */
8111         val &= ~DREF_NONSPREAD_SOURCE_MASK;
8112
8113         if (has_ck505)
8114                 val |= DREF_NONSPREAD_CK505_ENABLE;
8115         else
8116                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8117
8118         if (has_panel) {
8119                 val &= ~DREF_SSC_SOURCE_MASK;
8120                 val |= DREF_SSC_SOURCE_ENABLE;
8121
8122                 /* SSC must be turned on before enabling the CPU output  */
8123                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8124                         DRM_DEBUG_KMS("Using SSC on panel\n");
8125                         val |= DREF_SSC1_ENABLE;
8126                 } else
8127                         val &= ~DREF_SSC1_ENABLE;
8128
8129                 /* Get SSC going before enabling the outputs */
8130                 I915_WRITE(PCH_DREF_CONTROL, val);
8131                 POSTING_READ(PCH_DREF_CONTROL);
8132                 udelay(200);
8133
8134                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8135
8136                 /* Enable CPU source on CPU attached eDP */
8137                 if (has_cpu_edp) {
8138                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8139                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
8140                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8141                         } else
8142                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8143                 } else
8144                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8145
8146                 I915_WRITE(PCH_DREF_CONTROL, val);
8147                 POSTING_READ(PCH_DREF_CONTROL);
8148                 udelay(200);
8149         } else {
8150                 DRM_DEBUG_KMS("Disabling CPU source output\n");
8151
8152                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8153
8154                 /* Turn off CPU output */
8155                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8156
8157                 I915_WRITE(PCH_DREF_CONTROL, val);
8158                 POSTING_READ(PCH_DREF_CONTROL);
8159                 udelay(200);
8160
8161                 if (!using_ssc_source) {
8162                         DRM_DEBUG_KMS("Disabling SSC source\n");
8163
8164                         /* Turn off the SSC source */
8165                         val &= ~DREF_SSC_SOURCE_MASK;
8166                         val |= DREF_SSC_SOURCE_DISABLE;
8167
8168                         /* Turn off SSC1 */
8169                         val &= ~DREF_SSC1_ENABLE;
8170
8171                         I915_WRITE(PCH_DREF_CONTROL, val);
8172                         POSTING_READ(PCH_DREF_CONTROL);
8173                         udelay(200);
8174                 }
8175         }
8176
8177         BUG_ON(val != final);
8178 }
8179
8180 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8181 {
8182         uint32_t tmp;
8183
8184         tmp = I915_READ(SOUTH_CHICKEN2);
8185         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8186         I915_WRITE(SOUTH_CHICKEN2, tmp);
8187
8188         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8189                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8190                 DRM_ERROR("FDI mPHY reset assert timeout\n");
8191
8192         tmp = I915_READ(SOUTH_CHICKEN2);
8193         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8194         I915_WRITE(SOUTH_CHICKEN2, tmp);
8195
8196         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8197                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8198                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8199 }
8200
8201 /* WaMPhyProgramming:hsw */
8202 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8203 {
8204         uint32_t tmp;
8205
8206         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8207         tmp &= ~(0xFF << 24);
8208         tmp |= (0x12 << 24);
8209         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8210
8211         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8212         tmp |= (1 << 11);
8213         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8214
8215         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8216         tmp |= (1 << 11);
8217         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8218
8219         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8220         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8221         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8222
8223         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8224         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8225         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8226
8227         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8228         tmp &= ~(7 << 13);
8229         tmp |= (5 << 13);
8230         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8231
8232         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8233         tmp &= ~(7 << 13);
8234         tmp |= (5 << 13);
8235         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8236
8237         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8238         tmp &= ~0xFF;
8239         tmp |= 0x1C;
8240         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8241
8242         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8243         tmp &= ~0xFF;
8244         tmp |= 0x1C;
8245         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8246
8247         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8248         tmp &= ~(0xFF << 16);
8249         tmp |= (0x1C << 16);
8250         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8251
8252         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8253         tmp &= ~(0xFF << 16);
8254         tmp |= (0x1C << 16);
8255         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8256
8257         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8258         tmp |= (1 << 27);
8259         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8260
8261         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8262         tmp |= (1 << 27);
8263         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8264
8265         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8266         tmp &= ~(0xF << 28);
8267         tmp |= (4 << 28);
8268         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8269
8270         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8271         tmp &= ~(0xF << 28);
8272         tmp |= (4 << 28);
8273         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8274 }
8275
8276 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8277  * Programming" based on the parameters passed:
8278  * - Sequence to enable CLKOUT_DP
8279  * - Sequence to enable CLKOUT_DP without spread
8280  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8281  */
8282 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8283                                  bool with_spread, bool with_fdi)
8284 {
8285         uint32_t reg, tmp;
8286
8287         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8288                 with_spread = true;
8289         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
8290             with_fdi, "LP PCH doesn't have FDI\n"))
8291                 with_fdi = false;
8292
8293         mutex_lock(&dev_priv->sb_lock);
8294
8295         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8296         tmp &= ~SBI_SSCCTL_DISABLE;
8297         tmp |= SBI_SSCCTL_PATHALT;
8298         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8299
8300         udelay(24);
8301
8302         if (with_spread) {
8303                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8304                 tmp &= ~SBI_SSCCTL_PATHALT;
8305                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8306
8307                 if (with_fdi) {
8308                         lpt_reset_fdi_mphy(dev_priv);
8309                         lpt_program_fdi_mphy(dev_priv);
8310                 }
8311         }
8312
8313         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8314         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8315         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8316         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8317
8318         mutex_unlock(&dev_priv->sb_lock);
8319 }
8320
8321 /* Sequence to disable CLKOUT_DP */
8322 static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
8323 {
8324         uint32_t reg, tmp;
8325
8326         mutex_lock(&dev_priv->sb_lock);
8327
8328         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8329         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8330         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8331         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8332
8333         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8334         if (!(tmp & SBI_SSCCTL_DISABLE)) {
8335                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8336                         tmp |= SBI_SSCCTL_PATHALT;
8337                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8338                         udelay(32);
8339                 }
8340                 tmp |= SBI_SSCCTL_DISABLE;
8341                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8342         }
8343
8344         mutex_unlock(&dev_priv->sb_lock);
8345 }
8346
8347 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8348
8349 static const uint16_t sscdivintphase[] = {
8350         [BEND_IDX( 50)] = 0x3B23,
8351         [BEND_IDX( 45)] = 0x3B23,
8352         [BEND_IDX( 40)] = 0x3C23,
8353         [BEND_IDX( 35)] = 0x3C23,
8354         [BEND_IDX( 30)] = 0x3D23,
8355         [BEND_IDX( 25)] = 0x3D23,
8356         [BEND_IDX( 20)] = 0x3E23,
8357         [BEND_IDX( 15)] = 0x3E23,
8358         [BEND_IDX( 10)] = 0x3F23,
8359         [BEND_IDX(  5)] = 0x3F23,
8360         [BEND_IDX(  0)] = 0x0025,
8361         [BEND_IDX( -5)] = 0x0025,
8362         [BEND_IDX(-10)] = 0x0125,
8363         [BEND_IDX(-15)] = 0x0125,
8364         [BEND_IDX(-20)] = 0x0225,
8365         [BEND_IDX(-25)] = 0x0225,
8366         [BEND_IDX(-30)] = 0x0325,
8367         [BEND_IDX(-35)] = 0x0325,
8368         [BEND_IDX(-40)] = 0x0425,
8369         [BEND_IDX(-45)] = 0x0425,
8370         [BEND_IDX(-50)] = 0x0525,
8371 };
8372
8373 /*
8374  * Bend CLKOUT_DP
8375  * steps -50 to 50 inclusive, in steps of 5
8376  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8377  * change in clock period = -(steps / 10) * 5.787 ps
8378  */
8379 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8380 {
8381         uint32_t tmp;
8382         int idx = BEND_IDX(steps);
8383
8384         if (WARN_ON(steps % 5 != 0))
8385                 return;
8386
8387         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8388                 return;
8389
8390         mutex_lock(&dev_priv->sb_lock);
8391
8392         if (steps % 10 != 0)
8393                 tmp = 0xAAAAAAAB;
8394         else
8395                 tmp = 0x00000000;
8396         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8397
8398         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8399         tmp &= 0xffff0000;
8400         tmp |= sscdivintphase[idx];
8401         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8402
8403         mutex_unlock(&dev_priv->sb_lock);
8404 }
8405
8406 #undef BEND_IDX
8407
8408 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
8409 {
8410         struct intel_encoder *encoder;
8411         bool has_vga = false;
8412
8413         for_each_intel_encoder(&dev_priv->drm, encoder) {
8414                 switch (encoder->type) {
8415                 case INTEL_OUTPUT_ANALOG:
8416                         has_vga = true;
8417                         break;
8418                 default:
8419                         break;
8420                 }
8421         }
8422
8423         if (has_vga) {
8424                 lpt_bend_clkout_dp(dev_priv, 0);
8425                 lpt_enable_clkout_dp(dev_priv, true, true);
8426         } else {
8427                 lpt_disable_clkout_dp(dev_priv);
8428         }
8429 }
8430
8431 /*
8432  * Initialize reference clocks when the driver loads
8433  */
8434 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
8435 {
8436         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
8437                 ironlake_init_pch_refclk(dev_priv);
8438         else if (HAS_PCH_LPT(dev_priv))
8439                 lpt_init_pch_refclk(dev_priv);
8440 }
8441
8442 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
8443 {
8444         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8445         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8446         enum pipe pipe = crtc->pipe;
8447         uint32_t val;
8448
8449         val = 0;
8450
8451         switch (crtc_state->pipe_bpp) {
8452         case 18:
8453                 val |= PIPECONF_6BPC;
8454                 break;
8455         case 24:
8456                 val |= PIPECONF_8BPC;
8457                 break;
8458         case 30:
8459                 val |= PIPECONF_10BPC;
8460                 break;
8461         case 36:
8462                 val |= PIPECONF_12BPC;
8463                 break;
8464         default:
8465                 /* Case prevented by intel_choose_pipe_bpp_dither. */
8466                 BUG();
8467         }
8468
8469         if (crtc_state->dither)
8470                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8471
8472         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8473                 val |= PIPECONF_INTERLACED_ILK;
8474         else
8475                 val |= PIPECONF_PROGRESSIVE;
8476
8477         if (crtc_state->limited_color_range)
8478                 val |= PIPECONF_COLOR_RANGE_SELECT;
8479
8480         I915_WRITE(PIPECONF(pipe), val);
8481         POSTING_READ(PIPECONF(pipe));
8482 }
8483
8484 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
8485 {
8486         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8487         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8488         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8489         u32 val = 0;
8490
8491         if (IS_HASWELL(dev_priv) && crtc_state->dither)
8492                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8493
8494         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8495                 val |= PIPECONF_INTERLACED_ILK;
8496         else
8497                 val |= PIPECONF_PROGRESSIVE;
8498
8499         I915_WRITE(PIPECONF(cpu_transcoder), val);
8500         POSTING_READ(PIPECONF(cpu_transcoder));
8501 }
8502
8503 static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
8504 {
8505         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
8506         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
8507
8508         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8509                 u32 val = 0;
8510
8511                 switch (crtc_state->pipe_bpp) {
8512                 case 18:
8513                         val |= PIPEMISC_DITHER_6_BPC;
8514                         break;
8515                 case 24:
8516                         val |= PIPEMISC_DITHER_8_BPC;
8517                         break;
8518                 case 30:
8519                         val |= PIPEMISC_DITHER_10_BPC;
8520                         break;
8521                 case 36:
8522                         val |= PIPEMISC_DITHER_12_BPC;
8523                         break;
8524                 default:
8525                         /* Case prevented by pipe_config_set_bpp. */
8526                         BUG();
8527                 }
8528
8529                 if (crtc_state->dither)
8530                         val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8531
8532                 if (crtc_state->ycbcr420) {
8533                         val |= PIPEMISC_OUTPUT_COLORSPACE_YUV |
8534                                 PIPEMISC_YUV420_ENABLE |
8535                                 PIPEMISC_YUV420_MODE_FULL_BLEND;
8536                 }
8537
8538                 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8539         }
8540 }
8541
8542 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8543 {
8544         /*
8545          * Account for spread spectrum to avoid
8546          * oversubscribing the link. Max center spread
8547          * is 2.5%; use 5% for safety's sake.
8548          */
8549         u32 bps = target_clock * bpp * 21 / 20;
8550         return DIV_ROUND_UP(bps, link_bw * 8);
8551 }
8552
8553 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8554 {
8555         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8556 }
8557
8558 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8559                                   struct intel_crtc_state *crtc_state,
8560                                   struct dpll *reduced_clock)
8561 {
8562         struct drm_crtc *crtc = &intel_crtc->base;
8563         struct drm_device *dev = crtc->dev;
8564         struct drm_i915_private *dev_priv = to_i915(dev);
8565         u32 dpll, fp, fp2;
8566         int factor;
8567
8568         /* Enable autotuning of the PLL clock (if permissible) */
8569         factor = 21;
8570         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8571                 if ((intel_panel_use_ssc(dev_priv) &&
8572                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
8573                     (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
8574                         factor = 25;
8575         } else if (crtc_state->sdvo_tv_clock)
8576                 factor = 20;
8577
8578         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8579
8580         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8581                 fp |= FP_CB_TUNE;
8582
8583         if (reduced_clock) {
8584                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8585
8586                 if (reduced_clock->m < factor * reduced_clock->n)
8587                         fp2 |= FP_CB_TUNE;
8588         } else {
8589                 fp2 = fp;
8590         }
8591
8592         dpll = 0;
8593
8594         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8595                 dpll |= DPLLB_MODE_LVDS;
8596         else
8597                 dpll |= DPLLB_MODE_DAC_SERIAL;
8598
8599         dpll |= (crtc_state->pixel_multiplier - 1)
8600                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8601
8602         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8603             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8604                 dpll |= DPLL_SDVO_HIGH_SPEED;
8605
8606         if (intel_crtc_has_dp_encoder(crtc_state))
8607                 dpll |= DPLL_SDVO_HIGH_SPEED;
8608
8609         /*
8610          * The high speed IO clock is only really required for
8611          * SDVO/HDMI/DP, but we also enable it for CRT to make it
8612          * possible to share the DPLL between CRT and HDMI. Enabling
8613          * the clock needlessly does no real harm, except use up a
8614          * bit of power potentially.
8615          *
8616          * We'll limit this to IVB with 3 pipes, since it has only two
8617          * DPLLs and so DPLL sharing is the only way to get three pipes
8618          * driving PCH ports at the same time. On SNB we could do this,
8619          * and potentially avoid enabling the second DPLL, but it's not
8620          * clear if it''s a win or loss power wise. No point in doing
8621          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
8622          */
8623         if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
8624             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
8625                 dpll |= DPLL_SDVO_HIGH_SPEED;
8626
8627         /* compute bitmask from p1 value */
8628         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8629         /* also FPA1 */
8630         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8631
8632         switch (crtc_state->dpll.p2) {
8633         case 5:
8634                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8635                 break;
8636         case 7:
8637                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8638                 break;
8639         case 10:
8640                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8641                 break;
8642         case 14:
8643                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8644                 break;
8645         }
8646
8647         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8648             intel_panel_use_ssc(dev_priv))
8649                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8650         else
8651                 dpll |= PLL_REF_INPUT_DREFCLK;
8652
8653         dpll |= DPLL_VCO_ENABLE;
8654
8655         crtc_state->dpll_hw_state.dpll = dpll;
8656         crtc_state->dpll_hw_state.fp0 = fp;
8657         crtc_state->dpll_hw_state.fp1 = fp2;
8658 }
8659
8660 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8661                                        struct intel_crtc_state *crtc_state)
8662 {
8663         struct drm_device *dev = crtc->base.dev;
8664         struct drm_i915_private *dev_priv = to_i915(dev);
8665         const struct intel_limit *limit;
8666         int refclk = 120000;
8667
8668         memset(&crtc_state->dpll_hw_state, 0,
8669                sizeof(crtc_state->dpll_hw_state));
8670
8671         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8672         if (!crtc_state->has_pch_encoder)
8673                 return 0;
8674
8675         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8676                 if (intel_panel_use_ssc(dev_priv)) {
8677                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8678                                       dev_priv->vbt.lvds_ssc_freq);
8679                         refclk = dev_priv->vbt.lvds_ssc_freq;
8680                 }
8681
8682                 if (intel_is_dual_link_lvds(dev)) {
8683                         if (refclk == 100000)
8684                                 limit = &intel_limits_ironlake_dual_lvds_100m;
8685                         else
8686                                 limit = &intel_limits_ironlake_dual_lvds;
8687                 } else {
8688                         if (refclk == 100000)
8689                                 limit = &intel_limits_ironlake_single_lvds_100m;
8690                         else
8691                                 limit = &intel_limits_ironlake_single_lvds;
8692                 }
8693         } else {
8694                 limit = &intel_limits_ironlake_dac;
8695         }
8696
8697         if (!crtc_state->clock_set &&
8698             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8699                                 refclk, NULL, &crtc_state->dpll)) {
8700                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8701                 return -EINVAL;
8702         }
8703
8704         ironlake_compute_dpll(crtc, crtc_state, NULL);
8705
8706         if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
8707                 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
8708                               pipe_name(crtc->pipe));
8709                 return -EINVAL;
8710         }
8711
8712         return 0;
8713 }
8714
8715 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8716                                          struct intel_link_m_n *m_n)
8717 {
8718         struct drm_device *dev = crtc->base.dev;
8719         struct drm_i915_private *dev_priv = to_i915(dev);
8720         enum pipe pipe = crtc->pipe;
8721
8722         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8723         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8724         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8725                 & ~TU_SIZE_MASK;
8726         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8727         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8728                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8729 }
8730
8731 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8732                                          enum transcoder transcoder,
8733                                          struct intel_link_m_n *m_n,
8734                                          struct intel_link_m_n *m2_n2)
8735 {
8736         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8737         enum pipe pipe = crtc->pipe;
8738
8739         if (INTEL_GEN(dev_priv) >= 5) {
8740                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
8741                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
8742                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
8743                         & ~TU_SIZE_MASK;
8744                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
8745                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
8746                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8747                 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
8748                  * gen < 8) and if DRRS is supported (to make sure the
8749                  * registers are not unnecessarily read).
8750                  */
8751                 if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
8752                         crtc->config->has_drrs) {
8753                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
8754                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
8755                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
8756                                         & ~TU_SIZE_MASK;
8757                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
8758                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
8759                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8760                 }
8761         } else {
8762                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
8763                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
8764                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
8765                         & ~TU_SIZE_MASK;
8766                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
8767                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
8768                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8769         }
8770 }
8771
8772 void intel_dp_get_m_n(struct intel_crtc *crtc,
8773                       struct intel_crtc_state *pipe_config)
8774 {
8775         if (pipe_config->has_pch_encoder)
8776                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
8777         else
8778                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
8779                                              &pipe_config->dp_m_n,
8780                                              &pipe_config->dp_m2_n2);
8781 }
8782
8783 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
8784                                         struct intel_crtc_state *pipe_config)
8785 {
8786         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
8787                                      &pipe_config->fdi_m_n, NULL);
8788 }
8789
8790 static void skylake_get_pfit_config(struct intel_crtc *crtc,
8791                                     struct intel_crtc_state *pipe_config)
8792 {
8793         struct drm_device *dev = crtc->base.dev;
8794         struct drm_i915_private *dev_priv = to_i915(dev);
8795         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
8796         uint32_t ps_ctrl = 0;
8797         int id = -1;
8798         int i;
8799
8800         /* find scaler attached to this pipe */
8801         for (i = 0; i < crtc->num_scalers; i++) {
8802                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
8803                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
8804                         id = i;
8805                         pipe_config->pch_pfit.enabled = true;
8806                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
8807                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
8808                         break;
8809                 }
8810         }
8811
8812         scaler_state->scaler_id = id;
8813         if (id >= 0) {
8814                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
8815         } else {
8816                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
8817         }
8818 }
8819
8820 static void
8821 skylake_get_initial_plane_config(struct intel_crtc *crtc,
8822                                  struct intel_initial_plane_config *plane_config)
8823 {
8824         struct drm_device *dev = crtc->base.dev;
8825         struct drm_i915_private *dev_priv = to_i915(dev);
8826         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8827         enum plane_id plane_id = plane->id;
8828         enum pipe pipe;
8829         u32 val, base, offset, stride_mult, tiling, alpha;
8830         int fourcc, pixel_format;
8831         unsigned int aligned_height;
8832         struct drm_framebuffer *fb;
8833         struct intel_framebuffer *intel_fb;
8834
8835         if (!plane->get_hw_state(plane, &pipe))
8836                 return;
8837
8838         WARN_ON(pipe != crtc->pipe);
8839
8840         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8841         if (!intel_fb) {
8842                 DRM_DEBUG_KMS("failed to alloc fb\n");
8843                 return;
8844         }
8845
8846         fb = &intel_fb->base;
8847
8848         fb->dev = dev;
8849
8850         val = I915_READ(PLANE_CTL(pipe, plane_id));
8851
8852         if (INTEL_GEN(dev_priv) >= 11)
8853                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
8854         else
8855                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
8856
8857         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
8858                 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
8859                 alpha &= PLANE_COLOR_ALPHA_MASK;
8860         } else {
8861                 alpha = val & PLANE_CTL_ALPHA_MASK;
8862         }
8863
8864         fourcc = skl_format_to_fourcc(pixel_format,
8865                                       val & PLANE_CTL_ORDER_RGBX, alpha);
8866         fb->format = drm_format_info(fourcc);
8867
8868         tiling = val & PLANE_CTL_TILED_MASK;
8869         switch (tiling) {
8870         case PLANE_CTL_TILED_LINEAR:
8871                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
8872                 break;
8873         case PLANE_CTL_TILED_X:
8874                 plane_config->tiling = I915_TILING_X;
8875                 fb->modifier = I915_FORMAT_MOD_X_TILED;
8876                 break;
8877         case PLANE_CTL_TILED_Y:
8878                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
8879                         fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
8880                 else
8881                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
8882                 break;
8883         case PLANE_CTL_TILED_YF:
8884                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
8885                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
8886                 else
8887                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
8888                 break;
8889         default:
8890                 MISSING_CASE(tiling);
8891                 goto error;
8892         }
8893
8894         base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
8895         plane_config->base = base;
8896
8897         offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
8898
8899         val = I915_READ(PLANE_SIZE(pipe, plane_id));
8900         fb->height = ((val >> 16) & 0xfff) + 1;
8901         fb->width = ((val >> 0) & 0x1fff) + 1;
8902
8903         val = I915_READ(PLANE_STRIDE(pipe, plane_id));
8904         stride_mult = intel_fb_stride_alignment(fb, 0);
8905         fb->pitches[0] = (val & 0x3ff) * stride_mult;
8906
8907         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8908
8909         plane_config->size = fb->pitches[0] * aligned_height;
8910
8911         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8912                       crtc->base.name, plane->base.name, fb->width, fb->height,
8913                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8914                       plane_config->size);
8915
8916         plane_config->fb = intel_fb;
8917         return;
8918
8919 error:
8920         kfree(intel_fb);
8921 }
8922
8923 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
8924                                      struct intel_crtc_state *pipe_config)
8925 {
8926         struct drm_device *dev = crtc->base.dev;
8927         struct drm_i915_private *dev_priv = to_i915(dev);
8928         uint32_t tmp;
8929
8930         tmp = I915_READ(PF_CTL(crtc->pipe));
8931
8932         if (tmp & PF_ENABLE) {
8933                 pipe_config->pch_pfit.enabled = true;
8934                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
8935                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
8936
8937                 /* We currently do not free assignements of panel fitters on
8938                  * ivb/hsw (since we don't use the higher upscaling modes which
8939                  * differentiates them) so just WARN about this case for now. */
8940                 if (IS_GEN7(dev_priv)) {
8941                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
8942                                 PF_PIPE_SEL_IVB(crtc->pipe));
8943                 }
8944         }
8945 }
8946
8947 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
8948                                      struct intel_crtc_state *pipe_config)
8949 {
8950         struct drm_device *dev = crtc->base.dev;
8951         struct drm_i915_private *dev_priv = to_i915(dev);
8952         enum intel_display_power_domain power_domain;
8953         uint32_t tmp;
8954         bool ret;
8955
8956         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8957         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8958                 return false;
8959
8960         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8961         pipe_config->shared_dpll = NULL;
8962
8963         ret = false;
8964         tmp = I915_READ(PIPECONF(crtc->pipe));
8965         if (!(tmp & PIPECONF_ENABLE))
8966                 goto out;
8967
8968         switch (tmp & PIPECONF_BPC_MASK) {
8969         case PIPECONF_6BPC:
8970                 pipe_config->pipe_bpp = 18;
8971                 break;
8972         case PIPECONF_8BPC:
8973                 pipe_config->pipe_bpp = 24;
8974                 break;
8975         case PIPECONF_10BPC:
8976                 pipe_config->pipe_bpp = 30;
8977                 break;
8978         case PIPECONF_12BPC:
8979                 pipe_config->pipe_bpp = 36;
8980                 break;
8981         default:
8982                 break;
8983         }
8984
8985         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
8986                 pipe_config->limited_color_range = true;
8987
8988         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
8989                 struct intel_shared_dpll *pll;
8990                 enum intel_dpll_id pll_id;
8991
8992                 pipe_config->has_pch_encoder = true;
8993
8994                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
8995                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
8996                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
8997
8998                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
8999
9000                 if (HAS_PCH_IBX(dev_priv)) {
9001                         /*
9002                          * The pipe->pch transcoder and pch transcoder->pll
9003                          * mapping is fixed.
9004                          */
9005                         pll_id = (enum intel_dpll_id) crtc->pipe;
9006                 } else {
9007                         tmp = I915_READ(PCH_DPLL_SEL);
9008                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9009                                 pll_id = DPLL_ID_PCH_PLL_B;
9010                         else
9011                                 pll_id= DPLL_ID_PCH_PLL_A;
9012                 }
9013
9014                 pipe_config->shared_dpll =
9015                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
9016                 pll = pipe_config->shared_dpll;
9017
9018                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9019                                                 &pipe_config->dpll_hw_state));
9020
9021                 tmp = pipe_config->dpll_hw_state.dpll;
9022                 pipe_config->pixel_multiplier =
9023                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9024                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9025
9026                 ironlake_pch_clock_get(crtc, pipe_config);
9027         } else {
9028                 pipe_config->pixel_multiplier = 1;
9029         }
9030
9031         intel_get_pipe_timings(crtc, pipe_config);
9032         intel_get_pipe_src_size(crtc, pipe_config);
9033
9034         ironlake_get_pfit_config(crtc, pipe_config);
9035
9036         ret = true;
9037
9038 out:
9039         intel_display_power_put(dev_priv, power_domain);
9040
9041         return ret;
9042 }
9043
9044 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9045 {
9046         struct drm_device *dev = &dev_priv->drm;
9047         struct intel_crtc *crtc;
9048
9049         for_each_intel_crtc(dev, crtc)
9050                 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9051                      pipe_name(crtc->pipe));
9052
9053         I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
9054                         "Display power well on\n");
9055         I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9056         I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9057         I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9058         I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
9059         I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9060              "CPU PWM1 enabled\n");
9061         if (IS_HASWELL(dev_priv))
9062                 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9063                      "CPU PWM2 enabled\n");
9064         I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9065              "PCH PWM1 enabled\n");
9066         I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9067              "Utility pin enabled\n");
9068         I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9069
9070         /*
9071          * In theory we can still leave IRQs enabled, as long as only the HPD
9072          * interrupts remain enabled. We used to check for that, but since it's
9073          * gen-specific and since we only disable LCPLL after we fully disable
9074          * the interrupts, the check below should be enough.
9075          */
9076         I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9077 }
9078
9079 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9080 {
9081         if (IS_HASWELL(dev_priv))
9082                 return I915_READ(D_COMP_HSW);
9083         else
9084                 return I915_READ(D_COMP_BDW);
9085 }
9086
9087 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9088 {
9089         if (IS_HASWELL(dev_priv)) {
9090                 mutex_lock(&dev_priv->pcu_lock);
9091                 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9092                                             val))
9093                         DRM_DEBUG_KMS("Failed to write to D_COMP\n");
9094                 mutex_unlock(&dev_priv->pcu_lock);
9095         } else {
9096                 I915_WRITE(D_COMP_BDW, val);
9097                 POSTING_READ(D_COMP_BDW);
9098         }
9099 }
9100
9101 /*
9102  * This function implements pieces of two sequences from BSpec:
9103  * - Sequence for display software to disable LCPLL
9104  * - Sequence for display software to allow package C8+
9105  * The steps implemented here are just the steps that actually touch the LCPLL
9106  * register. Callers should take care of disabling all the display engine
9107  * functions, doing the mode unset, fixing interrupts, etc.
9108  */
9109 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9110                               bool switch_to_fclk, bool allow_power_down)
9111 {
9112         uint32_t val;
9113
9114         assert_can_disable_lcpll(dev_priv);
9115
9116         val = I915_READ(LCPLL_CTL);
9117
9118         if (switch_to_fclk) {
9119                 val |= LCPLL_CD_SOURCE_FCLK;
9120                 I915_WRITE(LCPLL_CTL, val);
9121
9122                 if (wait_for_us(I915_READ(LCPLL_CTL) &
9123                                 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9124                         DRM_ERROR("Switching to FCLK failed\n");
9125
9126                 val = I915_READ(LCPLL_CTL);
9127         }
9128
9129         val |= LCPLL_PLL_DISABLE;
9130         I915_WRITE(LCPLL_CTL, val);
9131         POSTING_READ(LCPLL_CTL);
9132
9133         if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
9134                 DRM_ERROR("LCPLL still locked\n");
9135
9136         val = hsw_read_dcomp(dev_priv);
9137         val |= D_COMP_COMP_DISABLE;
9138         hsw_write_dcomp(dev_priv, val);
9139         ndelay(100);
9140
9141         if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9142                      1))
9143                 DRM_ERROR("D_COMP RCOMP still in progress\n");
9144
9145         if (allow_power_down) {
9146                 val = I915_READ(LCPLL_CTL);
9147                 val |= LCPLL_POWER_DOWN_ALLOW;
9148                 I915_WRITE(LCPLL_CTL, val);
9149                 POSTING_READ(LCPLL_CTL);
9150         }
9151 }
9152
9153 /*
9154  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9155  * source.
9156  */
9157 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9158 {
9159         uint32_t val;
9160
9161         val = I915_READ(LCPLL_CTL);
9162
9163         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9164                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9165                 return;
9166
9167         /*
9168          * Make sure we're not on PC8 state before disabling PC8, otherwise
9169          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9170          */
9171         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9172
9173         if (val & LCPLL_POWER_DOWN_ALLOW) {
9174                 val &= ~LCPLL_POWER_DOWN_ALLOW;
9175                 I915_WRITE(LCPLL_CTL, val);
9176                 POSTING_READ(LCPLL_CTL);
9177         }
9178
9179         val = hsw_read_dcomp(dev_priv);
9180         val |= D_COMP_COMP_FORCE;
9181         val &= ~D_COMP_COMP_DISABLE;
9182         hsw_write_dcomp(dev_priv, val);
9183
9184         val = I915_READ(LCPLL_CTL);
9185         val &= ~LCPLL_PLL_DISABLE;
9186         I915_WRITE(LCPLL_CTL, val);
9187
9188         if (intel_wait_for_register(dev_priv,
9189                                     LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9190                                     5))
9191                 DRM_ERROR("LCPLL not locked yet\n");
9192
9193         if (val & LCPLL_CD_SOURCE_FCLK) {
9194                 val = I915_READ(LCPLL_CTL);
9195                 val &= ~LCPLL_CD_SOURCE_FCLK;
9196                 I915_WRITE(LCPLL_CTL, val);
9197
9198                 if (wait_for_us((I915_READ(LCPLL_CTL) &
9199                                  LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9200                         DRM_ERROR("Switching back to LCPLL failed\n");
9201         }
9202
9203         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9204
9205         intel_update_cdclk(dev_priv);
9206         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
9207 }
9208
9209 /*
9210  * Package states C8 and deeper are really deep PC states that can only be
9211  * reached when all the devices on the system allow it, so even if the graphics
9212  * device allows PC8+, it doesn't mean the system will actually get to these
9213  * states. Our driver only allows PC8+ when going into runtime PM.
9214  *
9215  * The requirements for PC8+ are that all the outputs are disabled, the power
9216  * well is disabled and most interrupts are disabled, and these are also
9217  * requirements for runtime PM. When these conditions are met, we manually do
9218  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9219  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9220  * hang the machine.
9221  *
9222  * When we really reach PC8 or deeper states (not just when we allow it) we lose
9223  * the state of some registers, so when we come back from PC8+ we need to
9224  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9225  * need to take care of the registers kept by RC6. Notice that this happens even
9226  * if we don't put the device in PCI D3 state (which is what currently happens
9227  * because of the runtime PM support).
9228  *
9229  * For more, read "Display Sequences for Package C8" on the hardware
9230  * documentation.
9231  */
9232 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9233 {
9234         uint32_t val;
9235
9236         DRM_DEBUG_KMS("Enabling package C8+\n");
9237
9238         if (HAS_PCH_LPT_LP(dev_priv)) {
9239                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9240                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9241                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9242         }
9243
9244         lpt_disable_clkout_dp(dev_priv);
9245         hsw_disable_lcpll(dev_priv, true, true);
9246 }
9247
9248 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9249 {
9250         uint32_t val;
9251
9252         DRM_DEBUG_KMS("Disabling package C8+\n");
9253
9254         hsw_restore_lcpll(dev_priv);
9255         lpt_init_pch_refclk(dev_priv);
9256
9257         if (HAS_PCH_LPT_LP(dev_priv)) {
9258                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9259                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9260                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9261         }
9262 }
9263
9264 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9265                                       struct intel_crtc_state *crtc_state)
9266 {
9267         struct intel_atomic_state *state =
9268                 to_intel_atomic_state(crtc_state->base.state);
9269
9270         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
9271                 struct intel_encoder *encoder =
9272                         intel_get_crtc_new_encoder(state, crtc_state);
9273
9274                 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
9275                         DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9276                                       pipe_name(crtc->pipe));
9277                         return -EINVAL;
9278                 }
9279         }
9280
9281         return 0;
9282 }
9283
9284 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9285                                    enum port port,
9286                                    struct intel_crtc_state *pipe_config)
9287 {
9288         enum intel_dpll_id id;
9289         u32 temp;
9290
9291         temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9292         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9293
9294         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9295                 return;
9296
9297         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9298 }
9299
9300 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9301                                 enum port port,
9302                                 struct intel_crtc_state *pipe_config)
9303 {
9304         enum intel_dpll_id id;
9305         u32 temp;
9306
9307         /* TODO: TBT pll not implemented. */
9308         switch (port) {
9309         case PORT_A:
9310         case PORT_B:
9311                 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9312                        DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9313                 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9314
9315                 if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1))
9316                         return;
9317                 break;
9318         case PORT_C:
9319                 id = DPLL_ID_ICL_MGPLL1;
9320                 break;
9321         case PORT_D:
9322                 id = DPLL_ID_ICL_MGPLL2;
9323                 break;
9324         case PORT_E:
9325                 id = DPLL_ID_ICL_MGPLL3;
9326                 break;
9327         case PORT_F:
9328                 id = DPLL_ID_ICL_MGPLL4;
9329                 break;
9330         default:
9331                 MISSING_CASE(port);
9332                 return;
9333         }
9334
9335         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9336 }
9337
9338 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9339                                 enum port port,
9340                                 struct intel_crtc_state *pipe_config)
9341 {
9342         enum intel_dpll_id id;
9343
9344         switch (port) {
9345         case PORT_A:
9346                 id = DPLL_ID_SKL_DPLL0;
9347                 break;
9348         case PORT_B:
9349                 id = DPLL_ID_SKL_DPLL1;
9350                 break;
9351         case PORT_C:
9352                 id = DPLL_ID_SKL_DPLL2;
9353                 break;
9354         default:
9355                 DRM_ERROR("Incorrect port type\n");
9356                 return;
9357         }
9358
9359         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9360 }
9361
9362 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9363                                 enum port port,
9364                                 struct intel_crtc_state *pipe_config)
9365 {
9366         enum intel_dpll_id id;
9367         u32 temp;
9368
9369         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9370         id = temp >> (port * 3 + 1);
9371
9372         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
9373                 return;
9374
9375         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9376 }
9377
9378 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9379                                 enum port port,
9380                                 struct intel_crtc_state *pipe_config)
9381 {
9382         enum intel_dpll_id id;
9383         uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9384
9385         switch (ddi_pll_sel) {
9386         case PORT_CLK_SEL_WRPLL1:
9387                 id = DPLL_ID_WRPLL1;
9388                 break;
9389         case PORT_CLK_SEL_WRPLL2:
9390                 id = DPLL_ID_WRPLL2;
9391                 break;
9392         case PORT_CLK_SEL_SPLL:
9393                 id = DPLL_ID_SPLL;
9394                 break;
9395         case PORT_CLK_SEL_LCPLL_810:
9396                 id = DPLL_ID_LCPLL_810;
9397                 break;
9398         case PORT_CLK_SEL_LCPLL_1350:
9399                 id = DPLL_ID_LCPLL_1350;
9400                 break;
9401         case PORT_CLK_SEL_LCPLL_2700:
9402                 id = DPLL_ID_LCPLL_2700;
9403                 break;
9404         default:
9405                 MISSING_CASE(ddi_pll_sel);
9406                 /* fall through */
9407         case PORT_CLK_SEL_NONE:
9408                 return;
9409         }
9410
9411         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9412 }
9413
9414 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9415                                      struct intel_crtc_state *pipe_config,
9416                                      u64 *power_domain_mask)
9417 {
9418         struct drm_device *dev = crtc->base.dev;
9419         struct drm_i915_private *dev_priv = to_i915(dev);
9420         enum intel_display_power_domain power_domain;
9421         u32 tmp;
9422
9423         /*
9424          * The pipe->transcoder mapping is fixed with the exception of the eDP
9425          * transcoder handled below.
9426          */
9427         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9428
9429         /*
9430          * XXX: Do intel_display_power_get_if_enabled before reading this (for
9431          * consistency and less surprising code; it's in always on power).
9432          */
9433         tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9434         if (tmp & TRANS_DDI_FUNC_ENABLE) {
9435                 enum pipe trans_edp_pipe;
9436                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9437                 default:
9438                         WARN(1, "unknown pipe linked to edp transcoder\n");
9439                         /* fall through */
9440                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9441                 case TRANS_DDI_EDP_INPUT_A_ON:
9442                         trans_edp_pipe = PIPE_A;
9443                         break;
9444                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9445                         trans_edp_pipe = PIPE_B;
9446                         break;
9447                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9448                         trans_edp_pipe = PIPE_C;
9449                         break;
9450                 }
9451
9452                 if (trans_edp_pipe == crtc->pipe)
9453                         pipe_config->cpu_transcoder = TRANSCODER_EDP;
9454         }
9455
9456         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9457         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9458                 return false;
9459         *power_domain_mask |= BIT_ULL(power_domain);
9460
9461         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9462
9463         return tmp & PIPECONF_ENABLE;
9464 }
9465
9466 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9467                                          struct intel_crtc_state *pipe_config,
9468                                          u64 *power_domain_mask)
9469 {
9470         struct drm_device *dev = crtc->base.dev;
9471         struct drm_i915_private *dev_priv = to_i915(dev);
9472         enum intel_display_power_domain power_domain;
9473         enum port port;
9474         enum transcoder cpu_transcoder;
9475         u32 tmp;
9476
9477         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9478                 if (port == PORT_A)
9479                         cpu_transcoder = TRANSCODER_DSI_A;
9480                 else
9481                         cpu_transcoder = TRANSCODER_DSI_C;
9482
9483                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9484                 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9485                         continue;
9486                 *power_domain_mask |= BIT_ULL(power_domain);
9487
9488                 /*
9489                  * The PLL needs to be enabled with a valid divider
9490                  * configuration, otherwise accessing DSI registers will hang
9491                  * the machine. See BSpec North Display Engine
9492                  * registers/MIPI[BXT]. We can break out here early, since we
9493                  * need the same DSI PLL to be enabled for both DSI ports.
9494                  */
9495                 if (!bxt_dsi_pll_is_enabled(dev_priv))
9496                         break;
9497
9498                 /* XXX: this works for video mode only */
9499                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9500                 if (!(tmp & DPI_ENABLE))
9501                         continue;
9502
9503                 tmp = I915_READ(MIPI_CTRL(port));
9504                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9505                         continue;
9506
9507                 pipe_config->cpu_transcoder = cpu_transcoder;
9508                 break;
9509         }
9510
9511         return transcoder_is_dsi(pipe_config->cpu_transcoder);
9512 }
9513
9514 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9515                                        struct intel_crtc_state *pipe_config)
9516 {
9517         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9518         struct intel_shared_dpll *pll;
9519         enum port port;
9520         uint32_t tmp;
9521
9522         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9523
9524         port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9525
9526         if (IS_ICELAKE(dev_priv))
9527                 icelake_get_ddi_pll(dev_priv, port, pipe_config);
9528         else if (IS_CANNONLAKE(dev_priv))
9529                 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
9530         else if (IS_GEN9_BC(dev_priv))
9531                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9532         else if (IS_GEN9_LP(dev_priv))
9533                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
9534         else
9535                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9536
9537         pll = pipe_config->shared_dpll;
9538         if (pll) {
9539                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9540                                                 &pipe_config->dpll_hw_state));
9541         }
9542
9543         /*
9544          * Haswell has only FDI/PCH transcoder A. It is which is connected to
9545          * DDI E. So just check whether this pipe is wired to DDI E and whether
9546          * the PCH transcoder is on.
9547          */
9548         if (INTEL_GEN(dev_priv) < 9 &&
9549             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9550                 pipe_config->has_pch_encoder = true;
9551
9552                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9553                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9554                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
9555
9556                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9557         }
9558 }
9559
9560 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9561                                     struct intel_crtc_state *pipe_config)
9562 {
9563         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9564         enum intel_display_power_domain power_domain;
9565         u64 power_domain_mask;
9566         bool active;
9567
9568         intel_crtc_init_scalers(crtc, pipe_config);
9569
9570         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9571         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9572                 return false;
9573         power_domain_mask = BIT_ULL(power_domain);
9574
9575         pipe_config->shared_dpll = NULL;
9576
9577         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
9578
9579         if (IS_GEN9_LP(dev_priv) &&
9580             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
9581                 WARN_ON(active);
9582                 active = true;
9583         }
9584
9585         if (!active)
9586                 goto out;
9587
9588         if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
9589                 haswell_get_ddi_port_state(crtc, pipe_config);
9590                 intel_get_pipe_timings(crtc, pipe_config);
9591         }
9592
9593         intel_get_pipe_src_size(crtc, pipe_config);
9594
9595         pipe_config->gamma_mode =
9596                 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9597
9598         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
9599                 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
9600                 bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
9601
9602                 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
9603                         bool blend_mode_420 = tmp &
9604                                               PIPEMISC_YUV420_MODE_FULL_BLEND;
9605
9606                         pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
9607                         if (pipe_config->ycbcr420 != clrspace_yuv ||
9608                             pipe_config->ycbcr420 != blend_mode_420)
9609                                 DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
9610                 } else if (clrspace_yuv) {
9611                         DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
9612                 }
9613         }
9614
9615         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9616         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9617                 power_domain_mask |= BIT_ULL(power_domain);
9618                 if (INTEL_GEN(dev_priv) >= 9)
9619                         skylake_get_pfit_config(crtc, pipe_config);
9620                 else
9621                         ironlake_get_pfit_config(crtc, pipe_config);
9622         }
9623
9624         if (hsw_crtc_supports_ips(crtc)) {
9625                 if (IS_HASWELL(dev_priv))
9626                         pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
9627                 else {
9628                         /*
9629                          * We cannot readout IPS state on broadwell, set to
9630                          * true so we can set it to a defined state on first
9631                          * commit.
9632                          */
9633                         pipe_config->ips_enabled = true;
9634                 }
9635         }
9636
9637         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
9638             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
9639                 pipe_config->pixel_multiplier =
9640                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9641         } else {
9642                 pipe_config->pixel_multiplier = 1;
9643         }
9644
9645 out:
9646         for_each_power_domain(power_domain, power_domain_mask)
9647                 intel_display_power_put(dev_priv, power_domain);
9648
9649         return active;
9650 }
9651
9652 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
9653 {
9654         struct drm_i915_private *dev_priv =
9655                 to_i915(plane_state->base.plane->dev);
9656         const struct drm_framebuffer *fb = plane_state->base.fb;
9657         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9658         u32 base;
9659
9660         if (INTEL_INFO(dev_priv)->cursor_needs_physical)
9661                 base = obj->phys_handle->busaddr;
9662         else
9663                 base = intel_plane_ggtt_offset(plane_state);
9664
9665         base += plane_state->color_plane[0].offset;
9666
9667         /* ILK+ do this automagically */
9668         if (HAS_GMCH_DISPLAY(dev_priv) &&
9669             plane_state->base.rotation & DRM_MODE_ROTATE_180)
9670                 base += (plane_state->base.crtc_h *
9671                          plane_state->base.crtc_w - 1) * fb->format->cpp[0];
9672
9673         return base;
9674 }
9675
9676 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
9677 {
9678         int x = plane_state->base.crtc_x;
9679         int y = plane_state->base.crtc_y;
9680         u32 pos = 0;
9681
9682         if (x < 0) {
9683                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
9684                 x = -x;
9685         }
9686         pos |= x << CURSOR_X_SHIFT;
9687
9688         if (y < 0) {
9689                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
9690                 y = -y;
9691         }
9692         pos |= y << CURSOR_Y_SHIFT;
9693
9694         return pos;
9695 }
9696
9697 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
9698 {
9699         const struct drm_mode_config *config =
9700                 &plane_state->base.plane->dev->mode_config;
9701         int width = plane_state->base.crtc_w;
9702         int height = plane_state->base.crtc_h;
9703
9704         return width > 0 && width <= config->cursor_width &&
9705                 height > 0 && height <= config->cursor_height;
9706 }
9707
9708 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
9709 {
9710         const struct drm_framebuffer *fb = plane_state->base.fb;
9711         unsigned int rotation = plane_state->base.rotation;
9712         int src_x, src_y;
9713         u32 offset;
9714         int ret;
9715
9716         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
9717         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
9718
9719         ret = intel_plane_check_stride(plane_state);
9720         if (ret)
9721                 return ret;
9722
9723         src_x = plane_state->base.src_x >> 16;
9724         src_y = plane_state->base.src_y >> 16;
9725
9726         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
9727         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
9728                                                     plane_state, 0);
9729
9730         if (src_x != 0 || src_y != 0) {
9731                 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
9732                 return -EINVAL;
9733         }
9734
9735         plane_state->color_plane[0].offset = offset;
9736
9737         return 0;
9738 }
9739
9740 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
9741                               struct intel_plane_state *plane_state)
9742 {
9743         const struct drm_framebuffer *fb = plane_state->base.fb;
9744         int ret;
9745
9746         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
9747                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
9748                 return -EINVAL;
9749         }
9750
9751         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
9752                                                   &crtc_state->base,
9753                                                   DRM_PLANE_HELPER_NO_SCALING,
9754                                                   DRM_PLANE_HELPER_NO_SCALING,
9755                                                   true, true);
9756         if (ret)
9757                 return ret;
9758
9759         if (!plane_state->base.visible)
9760                 return 0;
9761
9762         ret = intel_plane_check_src_coordinates(plane_state);
9763         if (ret)
9764                 return ret;
9765
9766         ret = intel_cursor_check_surface(plane_state);
9767         if (ret)
9768                 return ret;
9769
9770         return 0;
9771 }
9772
9773 static unsigned int
9774 i845_cursor_max_stride(struct intel_plane *plane,
9775                        u32 pixel_format, u64 modifier,
9776                        unsigned int rotation)
9777 {
9778         return 2048;
9779 }
9780
9781 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
9782                            const struct intel_plane_state *plane_state)
9783 {
9784         return CURSOR_ENABLE |
9785                 CURSOR_GAMMA_ENABLE |
9786                 CURSOR_FORMAT_ARGB |
9787                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
9788 }
9789
9790 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
9791 {
9792         int width = plane_state->base.crtc_w;
9793
9794         /*
9795          * 845g/865g are only limited by the width of their cursors,
9796          * the height is arbitrary up to the precision of the register.
9797          */
9798         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
9799 }
9800
9801 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
9802                              struct intel_plane_state *plane_state)
9803 {
9804         const struct drm_framebuffer *fb = plane_state->base.fb;
9805         int ret;
9806
9807         ret = intel_check_cursor(crtc_state, plane_state);
9808         if (ret)
9809                 return ret;
9810
9811         /* if we want to turn off the cursor ignore width and height */
9812         if (!fb)
9813                 return 0;
9814
9815         /* Check for which cursor types we support */
9816         if (!i845_cursor_size_ok(plane_state)) {
9817                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
9818                           plane_state->base.crtc_w,
9819                           plane_state->base.crtc_h);
9820                 return -EINVAL;
9821         }
9822
9823         WARN_ON(plane_state->base.visible &&
9824                 plane_state->color_plane[0].stride != fb->pitches[0]);
9825
9826         switch (fb->pitches[0]) {
9827         case 256:
9828         case 512:
9829         case 1024:
9830         case 2048:
9831                 break;
9832         default:
9833                 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
9834                               fb->pitches[0]);
9835                 return -EINVAL;
9836         }
9837
9838         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
9839
9840         return 0;
9841 }
9842
9843 static void i845_update_cursor(struct intel_plane *plane,
9844                                const struct intel_crtc_state *crtc_state,
9845                                const struct intel_plane_state *plane_state)
9846 {
9847         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9848         u32 cntl = 0, base = 0, pos = 0, size = 0;
9849         unsigned long irqflags;
9850
9851         if (plane_state && plane_state->base.visible) {
9852                 unsigned int width = plane_state->base.crtc_w;
9853                 unsigned int height = plane_state->base.crtc_h;
9854
9855                 cntl = plane_state->ctl;
9856                 size = (height << 12) | width;
9857
9858                 base = intel_cursor_base(plane_state);
9859                 pos = intel_cursor_position(plane_state);
9860         }
9861
9862         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
9863
9864         /* On these chipsets we can only modify the base/size/stride
9865          * whilst the cursor is disabled.
9866          */
9867         if (plane->cursor.base != base ||
9868             plane->cursor.size != size ||
9869             plane->cursor.cntl != cntl) {
9870                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
9871                 I915_WRITE_FW(CURBASE(PIPE_A), base);
9872                 I915_WRITE_FW(CURSIZE, size);
9873                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
9874                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
9875
9876                 plane->cursor.base = base;
9877                 plane->cursor.size = size;
9878                 plane->cursor.cntl = cntl;
9879         } else {
9880                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
9881         }
9882
9883         POSTING_READ_FW(CURCNTR(PIPE_A));
9884
9885         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
9886 }
9887
9888 static void i845_disable_cursor(struct intel_plane *plane,
9889                                 struct intel_crtc *crtc)
9890 {
9891         i845_update_cursor(plane, NULL, NULL);
9892 }
9893
9894 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
9895                                      enum pipe *pipe)
9896 {
9897         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9898         enum intel_display_power_domain power_domain;
9899         bool ret;
9900
9901         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
9902         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9903                 return false;
9904
9905         ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
9906
9907         *pipe = PIPE_A;
9908
9909         intel_display_power_put(dev_priv, power_domain);
9910
9911         return ret;
9912 }
9913
9914 static unsigned int
9915 i9xx_cursor_max_stride(struct intel_plane *plane,
9916                        u32 pixel_format, u64 modifier,
9917                        unsigned int rotation)
9918 {
9919         return plane->base.dev->mode_config.cursor_width * 4;
9920 }
9921
9922 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
9923                            const struct intel_plane_state *plane_state)
9924 {
9925         struct drm_i915_private *dev_priv =
9926                 to_i915(plane_state->base.plane->dev);
9927         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9928         u32 cntl = 0;
9929
9930         if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
9931                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
9932
9933         if (INTEL_GEN(dev_priv) <= 10) {
9934                 cntl |= MCURSOR_GAMMA_ENABLE;
9935
9936                 if (HAS_DDI(dev_priv))
9937                         cntl |= MCURSOR_PIPE_CSC_ENABLE;
9938         }
9939
9940         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
9941                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
9942
9943         switch (plane_state->base.crtc_w) {
9944         case 64:
9945                 cntl |= MCURSOR_MODE_64_ARGB_AX;
9946                 break;
9947         case 128:
9948                 cntl |= MCURSOR_MODE_128_ARGB_AX;
9949                 break;
9950         case 256:
9951                 cntl |= MCURSOR_MODE_256_ARGB_AX;
9952                 break;
9953         default:
9954                 MISSING_CASE(plane_state->base.crtc_w);
9955                 return 0;
9956         }
9957
9958         if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
9959                 cntl |= MCURSOR_ROTATE_180;
9960
9961         return cntl;
9962 }
9963
9964 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
9965 {
9966         struct drm_i915_private *dev_priv =
9967                 to_i915(plane_state->base.plane->dev);
9968         int width = plane_state->base.crtc_w;
9969         int height = plane_state->base.crtc_h;
9970
9971         if (!intel_cursor_size_ok(plane_state))
9972                 return false;
9973
9974         /* Cursor width is limited to a few power-of-two sizes */
9975         switch (width) {
9976         case 256:
9977         case 128:
9978         case 64:
9979                 break;
9980         default:
9981                 return false;
9982         }
9983
9984         /*
9985          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
9986          * height from 8 lines up to the cursor width, when the
9987          * cursor is not rotated. Everything else requires square
9988          * cursors.
9989          */
9990         if (HAS_CUR_FBC(dev_priv) &&
9991             plane_state->base.rotation & DRM_MODE_ROTATE_0) {
9992                 if (height < 8 || height > width)
9993                         return false;
9994         } else {
9995                 if (height != width)
9996                         return false;
9997         }
9998
9999         return true;
10000 }
10001
10002 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
10003                              struct intel_plane_state *plane_state)
10004 {
10005         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
10006         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10007         const struct drm_framebuffer *fb = plane_state->base.fb;
10008         enum pipe pipe = plane->pipe;
10009         int ret;
10010
10011         ret = intel_check_cursor(crtc_state, plane_state);
10012         if (ret)
10013                 return ret;
10014
10015         /* if we want to turn off the cursor ignore width and height */
10016         if (!fb)
10017                 return 0;
10018
10019         /* Check for which cursor types we support */
10020         if (!i9xx_cursor_size_ok(plane_state)) {
10021                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10022                           plane_state->base.crtc_w,
10023                           plane_state->base.crtc_h);
10024                 return -EINVAL;
10025         }
10026
10027         WARN_ON(plane_state->base.visible &&
10028                 plane_state->color_plane[0].stride != fb->pitches[0]);
10029
10030         if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10031                 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10032                               fb->pitches[0], plane_state->base.crtc_w);
10033                 return -EINVAL;
10034         }
10035
10036         /*
10037          * There's something wrong with the cursor on CHV pipe C.
10038          * If it straddles the left edge of the screen then
10039          * moving it away from the edge or disabling it often
10040          * results in a pipe underrun, and often that can lead to
10041          * dead pipe (constant underrun reported, and it scans
10042          * out just a solid color). To recover from that, the
10043          * display power well must be turned off and on again.
10044          * Refuse the put the cursor into that compromised position.
10045          */
10046         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10047             plane_state->base.visible && plane_state->base.crtc_x < 0) {
10048                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10049                 return -EINVAL;
10050         }
10051
10052         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10053
10054         return 0;
10055 }
10056
10057 static void i9xx_update_cursor(struct intel_plane *plane,
10058                                const struct intel_crtc_state *crtc_state,
10059                                const struct intel_plane_state *plane_state)
10060 {
10061         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10062         enum pipe pipe = plane->pipe;
10063         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
10064         unsigned long irqflags;
10065
10066         if (plane_state && plane_state->base.visible) {
10067                 cntl = plane_state->ctl;
10068
10069                 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10070                         fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10071
10072                 base = intel_cursor_base(plane_state);
10073                 pos = intel_cursor_position(plane_state);
10074         }
10075
10076         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10077
10078         /*
10079          * On some platforms writing CURCNTR first will also
10080          * cause CURPOS to be armed by the CURBASE write.
10081          * Without the CURCNTR write the CURPOS write would
10082          * arm itself. Thus we always start the full update
10083          * with a CURCNTR write.
10084          *
10085          * On other platforms CURPOS always requires the
10086          * CURBASE write to arm the update. Additonally
10087          * a write to any of the cursor register will cancel
10088          * an already armed cursor update. Thus leaving out
10089          * the CURBASE write after CURPOS could lead to a
10090          * cursor that doesn't appear to move, or even change
10091          * shape. Thus we always write CURBASE.
10092          *
10093          * CURCNTR and CUR_FBC_CTL are always
10094          * armed by the CURBASE write only.
10095          */
10096         if (plane->cursor.base != base ||
10097             plane->cursor.size != fbc_ctl ||
10098             plane->cursor.cntl != cntl) {
10099                 I915_WRITE_FW(CURCNTR(pipe), cntl);
10100                 if (HAS_CUR_FBC(dev_priv))
10101                         I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
10102                 I915_WRITE_FW(CURPOS(pipe), pos);
10103                 I915_WRITE_FW(CURBASE(pipe), base);
10104
10105                 plane->cursor.base = base;
10106                 plane->cursor.size = fbc_ctl;
10107                 plane->cursor.cntl = cntl;
10108         } else {
10109                 I915_WRITE_FW(CURPOS(pipe), pos);
10110                 I915_WRITE_FW(CURBASE(pipe), base);
10111         }
10112
10113         POSTING_READ_FW(CURBASE(pipe));
10114
10115         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10116 }
10117
10118 static void i9xx_disable_cursor(struct intel_plane *plane,
10119                                 struct intel_crtc *crtc)
10120 {
10121         i9xx_update_cursor(plane, NULL, NULL);
10122 }
10123
10124 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10125                                      enum pipe *pipe)
10126 {
10127         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10128         enum intel_display_power_domain power_domain;
10129         bool ret;
10130         u32 val;
10131
10132         /*
10133          * Not 100% correct for planes that can move between pipes,
10134          * but that's only the case for gen2-3 which don't have any
10135          * display power wells.
10136          */
10137         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
10138         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10139                 return false;
10140
10141         val = I915_READ(CURCNTR(plane->pipe));
10142
10143         ret = val & MCURSOR_MODE;
10144
10145         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10146                 *pipe = plane->pipe;
10147         else
10148                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10149                         MCURSOR_PIPE_SELECT_SHIFT;
10150
10151         intel_display_power_put(dev_priv, power_domain);
10152
10153         return ret;
10154 }
10155
10156 /* VESA 640x480x72Hz mode to set on the pipe */
10157 static const struct drm_display_mode load_detect_mode = {
10158         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10159                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10160 };
10161
10162 struct drm_framebuffer *
10163 intel_framebuffer_create(struct drm_i915_gem_object *obj,
10164                          struct drm_mode_fb_cmd2 *mode_cmd)
10165 {
10166         struct intel_framebuffer *intel_fb;
10167         int ret;
10168
10169         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10170         if (!intel_fb)
10171                 return ERR_PTR(-ENOMEM);
10172
10173         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
10174         if (ret)
10175                 goto err;
10176
10177         return &intel_fb->base;
10178
10179 err:
10180         kfree(intel_fb);
10181         return ERR_PTR(ret);
10182 }
10183
10184 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10185                                         struct drm_crtc *crtc)
10186 {
10187         struct drm_plane *plane;
10188         struct drm_plane_state *plane_state;
10189         int ret, i;
10190
10191         ret = drm_atomic_add_affected_planes(state, crtc);
10192         if (ret)
10193                 return ret;
10194
10195         for_each_new_plane_in_state(state, plane, plane_state, i) {
10196                 if (plane_state->crtc != crtc)
10197                         continue;
10198
10199                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10200                 if (ret)
10201                         return ret;
10202
10203                 drm_atomic_set_fb_for_plane(plane_state, NULL);
10204         }
10205
10206         return 0;
10207 }
10208
10209 int intel_get_load_detect_pipe(struct drm_connector *connector,
10210                                const struct drm_display_mode *mode,
10211                                struct intel_load_detect_pipe *old,
10212                                struct drm_modeset_acquire_ctx *ctx)
10213 {
10214         struct intel_crtc *intel_crtc;
10215         struct intel_encoder *intel_encoder =
10216                 intel_attached_encoder(connector);
10217         struct drm_crtc *possible_crtc;
10218         struct drm_encoder *encoder = &intel_encoder->base;
10219         struct drm_crtc *crtc = NULL;
10220         struct drm_device *dev = encoder->dev;
10221         struct drm_i915_private *dev_priv = to_i915(dev);
10222         struct drm_mode_config *config = &dev->mode_config;
10223         struct drm_atomic_state *state = NULL, *restore_state = NULL;
10224         struct drm_connector_state *connector_state;
10225         struct intel_crtc_state *crtc_state;
10226         int ret, i = -1;
10227
10228         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10229                       connector->base.id, connector->name,
10230                       encoder->base.id, encoder->name);
10231
10232         old->restore_state = NULL;
10233
10234         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
10235
10236         /*
10237          * Algorithm gets a little messy:
10238          *
10239          *   - if the connector already has an assigned crtc, use it (but make
10240          *     sure it's on first)
10241          *
10242          *   - try to find the first unused crtc that can drive this connector,
10243          *     and use that if we find one
10244          */
10245
10246         /* See if we already have a CRTC for this connector */
10247         if (connector->state->crtc) {
10248                 crtc = connector->state->crtc;
10249
10250                 ret = drm_modeset_lock(&crtc->mutex, ctx);
10251                 if (ret)
10252                         goto fail;
10253
10254                 /* Make sure the crtc and connector are running */
10255                 goto found;
10256         }
10257
10258         /* Find an unused one (if possible) */
10259         for_each_crtc(dev, possible_crtc) {
10260                 i++;
10261                 if (!(encoder->possible_crtcs & (1 << i)))
10262                         continue;
10263
10264                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10265                 if (ret)
10266                         goto fail;
10267
10268                 if (possible_crtc->state->enable) {
10269                         drm_modeset_unlock(&possible_crtc->mutex);
10270                         continue;
10271                 }
10272
10273                 crtc = possible_crtc;
10274                 break;
10275         }
10276
10277         /*
10278          * If we didn't find an unused CRTC, don't use any.
10279          */
10280         if (!crtc) {
10281                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10282                 ret = -ENODEV;
10283                 goto fail;
10284         }
10285
10286 found:
10287         intel_crtc = to_intel_crtc(crtc);
10288
10289         state = drm_atomic_state_alloc(dev);
10290         restore_state = drm_atomic_state_alloc(dev);
10291         if (!state || !restore_state) {
10292                 ret = -ENOMEM;
10293                 goto fail;
10294         }
10295
10296         state->acquire_ctx = ctx;
10297         restore_state->acquire_ctx = ctx;
10298
10299         connector_state = drm_atomic_get_connector_state(state, connector);
10300         if (IS_ERR(connector_state)) {
10301                 ret = PTR_ERR(connector_state);
10302                 goto fail;
10303         }
10304
10305         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10306         if (ret)
10307                 goto fail;
10308
10309         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10310         if (IS_ERR(crtc_state)) {
10311                 ret = PTR_ERR(crtc_state);
10312                 goto fail;
10313         }
10314
10315         crtc_state->base.active = crtc_state->base.enable = true;
10316
10317         if (!mode)
10318                 mode = &load_detect_mode;
10319
10320         ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10321         if (ret)
10322                 goto fail;
10323
10324         ret = intel_modeset_disable_planes(state, crtc);
10325         if (ret)
10326                 goto fail;
10327
10328         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10329         if (!ret)
10330                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10331         if (!ret)
10332                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
10333         if (ret) {
10334                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10335                 goto fail;
10336         }
10337
10338         ret = drm_atomic_commit(state);
10339         if (ret) {
10340                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10341                 goto fail;
10342         }
10343
10344         old->restore_state = restore_state;
10345         drm_atomic_state_put(state);
10346
10347         /* let the connector get through one full cycle before testing */
10348         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
10349         return true;
10350
10351 fail:
10352         if (state) {
10353                 drm_atomic_state_put(state);
10354                 state = NULL;
10355         }
10356         if (restore_state) {
10357                 drm_atomic_state_put(restore_state);
10358                 restore_state = NULL;
10359         }
10360
10361         if (ret == -EDEADLK)
10362                 return ret;
10363
10364         return false;
10365 }
10366
10367 void intel_release_load_detect_pipe(struct drm_connector *connector,
10368                                     struct intel_load_detect_pipe *old,
10369                                     struct drm_modeset_acquire_ctx *ctx)
10370 {
10371         struct intel_encoder *intel_encoder =
10372                 intel_attached_encoder(connector);
10373         struct drm_encoder *encoder = &intel_encoder->base;
10374         struct drm_atomic_state *state = old->restore_state;
10375         int ret;
10376
10377         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10378                       connector->base.id, connector->name,
10379                       encoder->base.id, encoder->name);
10380
10381         if (!state)
10382                 return;
10383
10384         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
10385         if (ret)
10386                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10387         drm_atomic_state_put(state);
10388 }
10389
10390 static int i9xx_pll_refclk(struct drm_device *dev,
10391                            const struct intel_crtc_state *pipe_config)
10392 {
10393         struct drm_i915_private *dev_priv = to_i915(dev);
10394         u32 dpll = pipe_config->dpll_hw_state.dpll;
10395
10396         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10397                 return dev_priv->vbt.lvds_ssc_freq;
10398         else if (HAS_PCH_SPLIT(dev_priv))
10399                 return 120000;
10400         else if (!IS_GEN2(dev_priv))
10401                 return 96000;
10402         else
10403                 return 48000;
10404 }
10405
10406 /* Returns the clock of the currently programmed mode of the given pipe. */
10407 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10408                                 struct intel_crtc_state *pipe_config)
10409 {
10410         struct drm_device *dev = crtc->base.dev;
10411         struct drm_i915_private *dev_priv = to_i915(dev);
10412         int pipe = pipe_config->cpu_transcoder;
10413         u32 dpll = pipe_config->dpll_hw_state.dpll;
10414         u32 fp;
10415         struct dpll clock;
10416         int port_clock;
10417         int refclk = i9xx_pll_refclk(dev, pipe_config);
10418
10419         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10420                 fp = pipe_config->dpll_hw_state.fp0;
10421         else
10422                 fp = pipe_config->dpll_hw_state.fp1;
10423
10424         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10425         if (IS_PINEVIEW(dev_priv)) {
10426                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10427                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10428         } else {
10429                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10430                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10431         }
10432
10433         if (!IS_GEN2(dev_priv)) {
10434                 if (IS_PINEVIEW(dev_priv))
10435                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10436                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10437                 else
10438                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10439                                DPLL_FPA01_P1_POST_DIV_SHIFT);
10440
10441                 switch (dpll & DPLL_MODE_MASK) {
10442                 case DPLLB_MODE_DAC_SERIAL:
10443                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10444                                 5 : 10;
10445                         break;
10446                 case DPLLB_MODE_LVDS:
10447                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10448                                 7 : 14;
10449                         break;
10450                 default:
10451                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10452                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
10453                         return;
10454                 }
10455
10456                 if (IS_PINEVIEW(dev_priv))
10457                         port_clock = pnv_calc_dpll_params(refclk, &clock);
10458                 else
10459                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
10460         } else {
10461                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
10462                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10463
10464                 if (is_lvds) {
10465                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10466                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
10467
10468                         if (lvds & LVDS_CLKB_POWER_UP)
10469                                 clock.p2 = 7;
10470                         else
10471                                 clock.p2 = 14;
10472                 } else {
10473                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
10474                                 clock.p1 = 2;
10475                         else {
10476                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10477                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10478                         }
10479                         if (dpll & PLL_P2_DIVIDE_BY_4)
10480                                 clock.p2 = 4;
10481                         else
10482                                 clock.p2 = 2;
10483                 }
10484
10485                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10486         }
10487
10488         /*
10489          * This value includes pixel_multiplier. We will use
10490          * port_clock to compute adjusted_mode.crtc_clock in the
10491          * encoder's get_config() function.
10492          */
10493         pipe_config->port_clock = port_clock;
10494 }
10495
10496 int intel_dotclock_calculate(int link_freq,
10497                              const struct intel_link_m_n *m_n)
10498 {
10499         /*
10500          * The calculation for the data clock is:
10501          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10502          * But we want to avoid losing precison if possible, so:
10503          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10504          *
10505          * and the link clock is simpler:
10506          * link_clock = (m * link_clock) / n
10507          */
10508
10509         if (!m_n->link_n)
10510                 return 0;
10511
10512         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
10513 }
10514
10515 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10516                                    struct intel_crtc_state *pipe_config)
10517 {
10518         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10519
10520         /* read out port_clock from the DPLL */
10521         i9xx_crtc_clock_get(crtc, pipe_config);
10522
10523         /*
10524          * In case there is an active pipe without active ports,
10525          * we may need some idea for the dotclock anyway.
10526          * Calculate one based on the FDI configuration.
10527          */
10528         pipe_config->base.adjusted_mode.crtc_clock =
10529                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10530                                          &pipe_config->fdi_m_n);
10531 }
10532
10533 /* Returns the currently programmed mode of the given encoder. */
10534 struct drm_display_mode *
10535 intel_encoder_current_mode(struct intel_encoder *encoder)
10536 {
10537         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10538         struct intel_crtc_state *crtc_state;
10539         struct drm_display_mode *mode;
10540         struct intel_crtc *crtc;
10541         enum pipe pipe;
10542
10543         if (!encoder->get_hw_state(encoder, &pipe))
10544                 return NULL;
10545
10546         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10547
10548         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10549         if (!mode)
10550                 return NULL;
10551
10552         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
10553         if (!crtc_state) {
10554                 kfree(mode);
10555                 return NULL;
10556         }
10557
10558         crtc_state->base.crtc = &crtc->base;
10559
10560         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
10561                 kfree(crtc_state);
10562                 kfree(mode);
10563                 return NULL;
10564         }
10565
10566         encoder->get_config(encoder, crtc_state);
10567
10568         intel_mode_from_pipe_config(mode, crtc_state);
10569
10570         kfree(crtc_state);
10571
10572         return mode;
10573 }
10574
10575 static void intel_crtc_destroy(struct drm_crtc *crtc)
10576 {
10577         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10578
10579         drm_crtc_cleanup(crtc);
10580         kfree(intel_crtc);
10581 }
10582
10583 /**
10584  * intel_wm_need_update - Check whether watermarks need updating
10585  * @plane: drm plane
10586  * @state: new plane state
10587  *
10588  * Check current plane state versus the new one to determine whether
10589  * watermarks need to be recalculated.
10590  *
10591  * Returns true or false.
10592  */
10593 static bool intel_wm_need_update(struct drm_plane *plane,
10594                                  struct drm_plane_state *state)
10595 {
10596         struct intel_plane_state *new = to_intel_plane_state(state);
10597         struct intel_plane_state *cur = to_intel_plane_state(plane->state);
10598
10599         /* Update watermarks on tiling or size changes. */
10600         if (new->base.visible != cur->base.visible)
10601                 return true;
10602
10603         if (!cur->base.fb || !new->base.fb)
10604                 return false;
10605
10606         if (cur->base.fb->modifier != new->base.fb->modifier ||
10607             cur->base.rotation != new->base.rotation ||
10608             drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
10609             drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
10610             drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
10611             drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
10612                 return true;
10613
10614         return false;
10615 }
10616
10617 static bool needs_scaling(const struct intel_plane_state *state)
10618 {
10619         int src_w = drm_rect_width(&state->base.src) >> 16;
10620         int src_h = drm_rect_height(&state->base.src) >> 16;
10621         int dst_w = drm_rect_width(&state->base.dst);
10622         int dst_h = drm_rect_height(&state->base.dst);
10623
10624         return (src_w != dst_w || src_h != dst_h);
10625 }
10626
10627 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
10628                                     struct drm_crtc_state *crtc_state,
10629                                     const struct intel_plane_state *old_plane_state,
10630                                     struct drm_plane_state *plane_state)
10631 {
10632         struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
10633         struct drm_crtc *crtc = crtc_state->crtc;
10634         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10635         struct intel_plane *plane = to_intel_plane(plane_state->plane);
10636         struct drm_device *dev = crtc->dev;
10637         struct drm_i915_private *dev_priv = to_i915(dev);
10638         bool mode_changed = needs_modeset(crtc_state);
10639         bool was_crtc_enabled = old_crtc_state->base.active;
10640         bool is_crtc_enabled = crtc_state->active;
10641         bool turn_off, turn_on, visible, was_visible;
10642         struct drm_framebuffer *fb = plane_state->fb;
10643         int ret;
10644
10645         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
10646                 ret = skl_update_scaler_plane(
10647                         to_intel_crtc_state(crtc_state),
10648                         to_intel_plane_state(plane_state));
10649                 if (ret)
10650                         return ret;
10651         }
10652
10653         was_visible = old_plane_state->base.visible;
10654         visible = plane_state->visible;
10655
10656         if (!was_crtc_enabled && WARN_ON(was_visible))
10657                 was_visible = false;
10658
10659         /*
10660          * Visibility is calculated as if the crtc was on, but
10661          * after scaler setup everything depends on it being off
10662          * when the crtc isn't active.
10663          *
10664          * FIXME this is wrong for watermarks. Watermarks should also
10665          * be computed as if the pipe would be active. Perhaps move
10666          * per-plane wm computation to the .check_plane() hook, and
10667          * only combine the results from all planes in the current place?
10668          */
10669         if (!is_crtc_enabled) {
10670                 plane_state->visible = visible = false;
10671                 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
10672         }
10673
10674         if (!was_visible && !visible)
10675                 return 0;
10676
10677         if (fb != old_plane_state->base.fb)
10678                 pipe_config->fb_changed = true;
10679
10680         turn_off = was_visible && (!visible || mode_changed);
10681         turn_on = visible && (!was_visible || mode_changed);
10682
10683         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
10684                          intel_crtc->base.base.id, intel_crtc->base.name,
10685                          plane->base.base.id, plane->base.name,
10686                          fb ? fb->base.id : -1);
10687
10688         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
10689                          plane->base.base.id, plane->base.name,
10690                          was_visible, visible,
10691                          turn_off, turn_on, mode_changed);
10692
10693         if (turn_on) {
10694                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10695                         pipe_config->update_wm_pre = true;
10696
10697                 /* must disable cxsr around plane enable/disable */
10698                 if (plane->id != PLANE_CURSOR)
10699                         pipe_config->disable_cxsr = true;
10700         } else if (turn_off) {
10701                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10702                         pipe_config->update_wm_post = true;
10703
10704                 /* must disable cxsr around plane enable/disable */
10705                 if (plane->id != PLANE_CURSOR)
10706                         pipe_config->disable_cxsr = true;
10707         } else if (intel_wm_need_update(&plane->base, plane_state)) {
10708                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
10709                         /* FIXME bollocks */
10710                         pipe_config->update_wm_pre = true;
10711                         pipe_config->update_wm_post = true;
10712                 }
10713         }
10714
10715         if (visible || was_visible)
10716                 pipe_config->fb_bits |= plane->frontbuffer_bit;
10717
10718         /*
10719          * WaCxSRDisabledForSpriteScaling:ivb
10720          *
10721          * cstate->update_wm was already set above, so this flag will
10722          * take effect when we commit and program watermarks.
10723          */
10724         if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
10725             needs_scaling(to_intel_plane_state(plane_state)) &&
10726             !needs_scaling(old_plane_state))
10727                 pipe_config->disable_lp_wm = true;
10728
10729         return 0;
10730 }
10731
10732 static bool encoders_cloneable(const struct intel_encoder *a,
10733                                const struct intel_encoder *b)
10734 {
10735         /* masks could be asymmetric, so check both ways */
10736         return a == b || (a->cloneable & (1 << b->type) &&
10737                           b->cloneable & (1 << a->type));
10738 }
10739
10740 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
10741                                          struct intel_crtc *crtc,
10742                                          struct intel_encoder *encoder)
10743 {
10744         struct intel_encoder *source_encoder;
10745         struct drm_connector *connector;
10746         struct drm_connector_state *connector_state;
10747         int i;
10748
10749         for_each_new_connector_in_state(state, connector, connector_state, i) {
10750                 if (connector_state->crtc != &crtc->base)
10751                         continue;
10752
10753                 source_encoder =
10754                         to_intel_encoder(connector_state->best_encoder);
10755                 if (!encoders_cloneable(encoder, source_encoder))
10756                         return false;
10757         }
10758
10759         return true;
10760 }
10761
10762 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10763                                    struct drm_crtc_state *crtc_state)
10764 {
10765         struct drm_device *dev = crtc->dev;
10766         struct drm_i915_private *dev_priv = to_i915(dev);
10767         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10768         struct intel_crtc_state *pipe_config =
10769                 to_intel_crtc_state(crtc_state);
10770         struct drm_atomic_state *state = crtc_state->state;
10771         int ret;
10772         bool mode_changed = needs_modeset(crtc_state);
10773
10774         if (mode_changed && !crtc_state->active)
10775                 pipe_config->update_wm_post = true;
10776
10777         if (mode_changed && crtc_state->enable &&
10778             dev_priv->display.crtc_compute_clock &&
10779             !WARN_ON(pipe_config->shared_dpll)) {
10780                 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
10781                                                            pipe_config);
10782                 if (ret)
10783                         return ret;
10784         }
10785
10786         if (crtc_state->color_mgmt_changed) {
10787                 ret = intel_color_check(crtc, crtc_state);
10788                 if (ret)
10789                         return ret;
10790
10791                 /*
10792                  * Changing color management on Intel hardware is
10793                  * handled as part of planes update.
10794                  */
10795                 crtc_state->planes_changed = true;
10796         }
10797
10798         ret = 0;
10799         if (dev_priv->display.compute_pipe_wm) {
10800                 ret = dev_priv->display.compute_pipe_wm(pipe_config);
10801                 if (ret) {
10802                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
10803                         return ret;
10804                 }
10805         }
10806
10807         if (dev_priv->display.compute_intermediate_wm &&
10808             !to_intel_atomic_state(state)->skip_intermediate_wm) {
10809                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
10810                         return 0;
10811
10812                 /*
10813                  * Calculate 'intermediate' watermarks that satisfy both the
10814                  * old state and the new state.  We can program these
10815                  * immediately.
10816                  */
10817                 ret = dev_priv->display.compute_intermediate_wm(dev,
10818                                                                 intel_crtc,
10819                                                                 pipe_config);
10820                 if (ret) {
10821                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
10822                         return ret;
10823                 }
10824         } else if (dev_priv->display.compute_intermediate_wm) {
10825                 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
10826                         pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
10827         }
10828
10829         if (INTEL_GEN(dev_priv) >= 9) {
10830                 if (mode_changed)
10831                         ret = skl_update_scaler_crtc(pipe_config);
10832
10833                 if (!ret)
10834                         ret = skl_check_pipe_max_pixel_rate(intel_crtc,
10835                                                             pipe_config);
10836                 if (!ret)
10837                         ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
10838                                                          pipe_config);
10839         }
10840
10841         if (HAS_IPS(dev_priv))
10842                 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
10843
10844         return ret;
10845 }
10846
10847 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
10848         .atomic_check = intel_crtc_atomic_check,
10849 };
10850
10851 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
10852 {
10853         struct intel_connector *connector;
10854         struct drm_connector_list_iter conn_iter;
10855
10856         drm_connector_list_iter_begin(dev, &conn_iter);
10857         for_each_intel_connector_iter(connector, &conn_iter) {
10858                 if (connector->base.state->crtc)
10859                         drm_connector_put(&connector->base);
10860
10861                 if (connector->base.encoder) {
10862                         connector->base.state->best_encoder =
10863                                 connector->base.encoder;
10864                         connector->base.state->crtc =
10865                                 connector->base.encoder->crtc;
10866
10867                         drm_connector_get(&connector->base);
10868                 } else {
10869                         connector->base.state->best_encoder = NULL;
10870                         connector->base.state->crtc = NULL;
10871                 }
10872         }
10873         drm_connector_list_iter_end(&conn_iter);
10874 }
10875
10876 static void
10877 connected_sink_compute_bpp(struct intel_connector *connector,
10878                            struct intel_crtc_state *pipe_config)
10879 {
10880         const struct drm_display_info *info = &connector->base.display_info;
10881         int bpp = pipe_config->pipe_bpp;
10882
10883         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
10884                       connector->base.base.id,
10885                       connector->base.name);
10886
10887         /* Don't use an invalid EDID bpc value */
10888         if (info->bpc != 0 && info->bpc * 3 < bpp) {
10889                 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
10890                               bpp, info->bpc * 3);
10891                 pipe_config->pipe_bpp = info->bpc * 3;
10892         }
10893
10894         /* Clamp bpp to 8 on screens without EDID 1.4 */
10895         if (info->bpc == 0 && bpp > 24) {
10896                 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
10897                               bpp);
10898                 pipe_config->pipe_bpp = 24;
10899         }
10900 }
10901
10902 static int
10903 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
10904                           struct intel_crtc_state *pipe_config)
10905 {
10906         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10907         struct drm_atomic_state *state;
10908         struct drm_connector *connector;
10909         struct drm_connector_state *connector_state;
10910         int bpp, i;
10911
10912         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
10913             IS_CHERRYVIEW(dev_priv)))
10914                 bpp = 10*3;
10915         else if (INTEL_GEN(dev_priv) >= 5)
10916                 bpp = 12*3;
10917         else
10918                 bpp = 8*3;
10919
10920
10921         pipe_config->pipe_bpp = bpp;
10922
10923         state = pipe_config->base.state;
10924
10925         /* Clamp display bpp to EDID value */
10926         for_each_new_connector_in_state(state, connector, connector_state, i) {
10927                 if (connector_state->crtc != &crtc->base)
10928                         continue;
10929
10930                 connected_sink_compute_bpp(to_intel_connector(connector),
10931                                            pipe_config);
10932         }
10933
10934         return bpp;
10935 }
10936
10937 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
10938 {
10939         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
10940                         "type: 0x%x flags: 0x%x\n",
10941                 mode->crtc_clock,
10942                 mode->crtc_hdisplay, mode->crtc_hsync_start,
10943                 mode->crtc_hsync_end, mode->crtc_htotal,
10944                 mode->crtc_vdisplay, mode->crtc_vsync_start,
10945                 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
10946 }
10947
10948 static inline void
10949 intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
10950                       unsigned int lane_count, struct intel_link_m_n *m_n)
10951 {
10952         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
10953                       id, lane_count,
10954                       m_n->gmch_m, m_n->gmch_n,
10955                       m_n->link_m, m_n->link_n, m_n->tu);
10956 }
10957
10958 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
10959
10960 static const char * const output_type_str[] = {
10961         OUTPUT_TYPE(UNUSED),
10962         OUTPUT_TYPE(ANALOG),
10963         OUTPUT_TYPE(DVO),
10964         OUTPUT_TYPE(SDVO),
10965         OUTPUT_TYPE(LVDS),
10966         OUTPUT_TYPE(TVOUT),
10967         OUTPUT_TYPE(HDMI),
10968         OUTPUT_TYPE(DP),
10969         OUTPUT_TYPE(EDP),
10970         OUTPUT_TYPE(DSI),
10971         OUTPUT_TYPE(DDI),
10972         OUTPUT_TYPE(DP_MST),
10973 };
10974
10975 #undef OUTPUT_TYPE
10976
10977 static void snprintf_output_types(char *buf, size_t len,
10978                                   unsigned int output_types)
10979 {
10980         char *str = buf;
10981         int i;
10982
10983         str[0] = '\0';
10984
10985         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
10986                 int r;
10987
10988                 if ((output_types & BIT(i)) == 0)
10989                         continue;
10990
10991                 r = snprintf(str, len, "%s%s",
10992                              str != buf ? "," : "", output_type_str[i]);
10993                 if (r >= len)
10994                         break;
10995                 str += r;
10996                 len -= r;
10997
10998                 output_types &= ~BIT(i);
10999         }
11000
11001         WARN_ON_ONCE(output_types != 0);
11002 }
11003
11004 static void intel_dump_pipe_config(struct intel_crtc *crtc,
11005                                    struct intel_crtc_state *pipe_config,
11006                                    const char *context)
11007 {
11008         struct drm_device *dev = crtc->base.dev;
11009         struct drm_i915_private *dev_priv = to_i915(dev);
11010         struct drm_plane *plane;
11011         struct intel_plane *intel_plane;
11012         struct intel_plane_state *state;
11013         struct drm_framebuffer *fb;
11014         char buf[64];
11015
11016         DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
11017                       crtc->base.base.id, crtc->base.name, context);
11018
11019         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
11020         DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
11021                       buf, pipe_config->output_types);
11022
11023         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11024                       transcoder_name(pipe_config->cpu_transcoder),
11025                       pipe_config->pipe_bpp, pipe_config->dither);
11026
11027         if (pipe_config->has_pch_encoder)
11028                 intel_dump_m_n_config(pipe_config, "fdi",
11029                                       pipe_config->fdi_lanes,
11030                                       &pipe_config->fdi_m_n);
11031
11032         if (pipe_config->ycbcr420)
11033                 DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
11034
11035         if (intel_crtc_has_dp_encoder(pipe_config)) {
11036                 intel_dump_m_n_config(pipe_config, "dp m_n",
11037                                 pipe_config->lane_count, &pipe_config->dp_m_n);
11038                 if (pipe_config->has_drrs)
11039                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
11040                                               pipe_config->lane_count,
11041                                               &pipe_config->dp_m2_n2);
11042         }
11043
11044         DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
11045                       pipe_config->has_audio, pipe_config->has_infoframe);
11046
11047         DRM_DEBUG_KMS("requested mode:\n");
11048         drm_mode_debug_printmodeline(&pipe_config->base.mode);
11049         DRM_DEBUG_KMS("adjusted mode:\n");
11050         drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11051         intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
11052         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
11053                       pipe_config->port_clock,
11054                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11055                       pipe_config->pixel_rate);
11056
11057         if (INTEL_GEN(dev_priv) >= 9)
11058                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11059                               crtc->num_scalers,
11060                               pipe_config->scaler_state.scaler_users,
11061                               pipe_config->scaler_state.scaler_id);
11062
11063         if (HAS_GMCH_DISPLAY(dev_priv))
11064                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11065                               pipe_config->gmch_pfit.control,
11066                               pipe_config->gmch_pfit.pgm_ratios,
11067                               pipe_config->gmch_pfit.lvds_border_bits);
11068         else
11069                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
11070                               pipe_config->pch_pfit.pos,
11071                               pipe_config->pch_pfit.size,
11072                               enableddisabled(pipe_config->pch_pfit.enabled));
11073
11074         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11075                       pipe_config->ips_enabled, pipe_config->double_wide);
11076
11077         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
11078
11079         DRM_DEBUG_KMS("planes on this crtc\n");
11080         list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
11081                 struct drm_format_name_buf format_name;
11082                 intel_plane = to_intel_plane(plane);
11083                 if (intel_plane->pipe != crtc->pipe)
11084                         continue;
11085
11086                 state = to_intel_plane_state(plane->state);
11087                 fb = state->base.fb;
11088                 if (!fb) {
11089                         DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
11090                                       plane->base.id, plane->name, state->scaler_id);
11091                         continue;
11092                 }
11093
11094                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
11095                               plane->base.id, plane->name,
11096                               fb->base.id, fb->width, fb->height,
11097                               drm_get_format_name(fb->format->format, &format_name));
11098                 if (INTEL_GEN(dev_priv) >= 9)
11099                         DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
11100                                       state->scaler_id,
11101                                       state->base.src.x1 >> 16,
11102                                       state->base.src.y1 >> 16,
11103                                       drm_rect_width(&state->base.src) >> 16,
11104                                       drm_rect_height(&state->base.src) >> 16,
11105                                       state->base.dst.x1, state->base.dst.y1,
11106                                       drm_rect_width(&state->base.dst),
11107                                       drm_rect_height(&state->base.dst));
11108         }
11109 }
11110
11111 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
11112 {
11113         struct drm_device *dev = state->dev;
11114         struct drm_connector *connector;
11115         struct drm_connector_list_iter conn_iter;
11116         unsigned int used_ports = 0;
11117         unsigned int used_mst_ports = 0;
11118         bool ret = true;
11119
11120         /*
11121          * Walk the connector list instead of the encoder
11122          * list to detect the problem on ddi platforms
11123          * where there's just one encoder per digital port.
11124          */
11125         drm_connector_list_iter_begin(dev, &conn_iter);
11126         drm_for_each_connector_iter(connector, &conn_iter) {
11127                 struct drm_connector_state *connector_state;
11128                 struct intel_encoder *encoder;
11129
11130                 connector_state = drm_atomic_get_new_connector_state(state, connector);
11131                 if (!connector_state)
11132                         connector_state = connector->state;
11133
11134                 if (!connector_state->best_encoder)
11135                         continue;
11136
11137                 encoder = to_intel_encoder(connector_state->best_encoder);
11138
11139                 WARN_ON(!connector_state->crtc);
11140
11141                 switch (encoder->type) {
11142                         unsigned int port_mask;
11143                 case INTEL_OUTPUT_DDI:
11144                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
11145                                 break;
11146                         /* else: fall through */
11147                 case INTEL_OUTPUT_DP:
11148                 case INTEL_OUTPUT_HDMI:
11149                 case INTEL_OUTPUT_EDP:
11150                         port_mask = 1 << encoder->port;
11151
11152                         /* the same port mustn't appear more than once */
11153                         if (used_ports & port_mask)
11154                                 ret = false;
11155
11156                         used_ports |= port_mask;
11157                         break;
11158                 case INTEL_OUTPUT_DP_MST:
11159                         used_mst_ports |=
11160                                 1 << encoder->port;
11161                         break;
11162                 default:
11163                         break;
11164                 }
11165         }
11166         drm_connector_list_iter_end(&conn_iter);
11167
11168         /* can't mix MST and SST/HDMI on the same port */
11169         if (used_ports & used_mst_ports)
11170                 return false;
11171
11172         return ret;
11173 }
11174
11175 static void
11176 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11177 {
11178         struct drm_i915_private *dev_priv =
11179                 to_i915(crtc_state->base.crtc->dev);
11180         struct intel_crtc_scaler_state scaler_state;
11181         struct intel_dpll_hw_state dpll_hw_state;
11182         struct intel_shared_dpll *shared_dpll;
11183         struct intel_crtc_wm_state wm_state;
11184         bool force_thru, ips_force_disable;
11185
11186         /* FIXME: before the switch to atomic started, a new pipe_config was
11187          * kzalloc'd. Code that depends on any field being zero should be
11188          * fixed, so that the crtc_state can be safely duplicated. For now,
11189          * only fields that are know to not cause problems are preserved. */
11190
11191         scaler_state = crtc_state->scaler_state;
11192         shared_dpll = crtc_state->shared_dpll;
11193         dpll_hw_state = crtc_state->dpll_hw_state;
11194         force_thru = crtc_state->pch_pfit.force_thru;
11195         ips_force_disable = crtc_state->ips_force_disable;
11196         if (IS_G4X(dev_priv) ||
11197             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11198                 wm_state = crtc_state->wm;
11199
11200         /* Keep base drm_crtc_state intact, only clear our extended struct */
11201         BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
11202         memset(&crtc_state->base + 1, 0,
11203                sizeof(*crtc_state) - sizeof(crtc_state->base));
11204
11205         crtc_state->scaler_state = scaler_state;
11206         crtc_state->shared_dpll = shared_dpll;
11207         crtc_state->dpll_hw_state = dpll_hw_state;
11208         crtc_state->pch_pfit.force_thru = force_thru;
11209         crtc_state->ips_force_disable = ips_force_disable;
11210         if (IS_G4X(dev_priv) ||
11211             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11212                 crtc_state->wm = wm_state;
11213 }
11214
11215 static int
11216 intel_modeset_pipe_config(struct drm_crtc *crtc,
11217                           struct intel_crtc_state *pipe_config)
11218 {
11219         struct drm_atomic_state *state = pipe_config->base.state;
11220         struct intel_encoder *encoder;
11221         struct drm_connector *connector;
11222         struct drm_connector_state *connector_state;
11223         int base_bpp, ret = -EINVAL;
11224         int i;
11225         bool retry = true;
11226
11227         clear_intel_crtc_state(pipe_config);
11228
11229         pipe_config->cpu_transcoder =
11230                 (enum transcoder) to_intel_crtc(crtc)->pipe;
11231
11232         /*
11233          * Sanitize sync polarity flags based on requested ones. If neither
11234          * positive or negative polarity is requested, treat this as meaning
11235          * negative polarity.
11236          */
11237         if (!(pipe_config->base.adjusted_mode.flags &
11238               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
11239                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
11240
11241         if (!(pipe_config->base.adjusted_mode.flags &
11242               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
11243                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
11244
11245         base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11246                                              pipe_config);
11247         if (base_bpp < 0)
11248                 goto fail;
11249
11250         /*
11251          * Determine the real pipe dimensions. Note that stereo modes can
11252          * increase the actual pipe size due to the frame doubling and
11253          * insertion of additional space for blanks between the frame. This
11254          * is stored in the crtc timings. We use the requested mode to do this
11255          * computation to clearly distinguish it from the adjusted mode, which
11256          * can be changed by the connectors in the below retry loop.
11257          */
11258         drm_mode_get_hv_timing(&pipe_config->base.mode,
11259                                &pipe_config->pipe_src_w,
11260                                &pipe_config->pipe_src_h);
11261
11262         for_each_new_connector_in_state(state, connector, connector_state, i) {
11263                 if (connector_state->crtc != crtc)
11264                         continue;
11265
11266                 encoder = to_intel_encoder(connector_state->best_encoder);
11267
11268                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11269                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11270                         goto fail;
11271                 }
11272
11273                 /*
11274                  * Determine output_types before calling the .compute_config()
11275                  * hooks so that the hooks can use this information safely.
11276                  */
11277                 if (encoder->compute_output_type)
11278                         pipe_config->output_types |=
11279                                 BIT(encoder->compute_output_type(encoder, pipe_config,
11280                                                                  connector_state));
11281                 else
11282                         pipe_config->output_types |= BIT(encoder->type);
11283         }
11284
11285 encoder_retry:
11286         /* Ensure the port clock defaults are reset when retrying. */
11287         pipe_config->port_clock = 0;
11288         pipe_config->pixel_multiplier = 1;
11289
11290         /* Fill in default crtc timings, allow encoders to overwrite them. */
11291         drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11292                               CRTC_STEREO_DOUBLE);
11293
11294         /* Pass our mode to the connectors and the CRTC to give them a chance to
11295          * adjust it according to limitations or connector properties, and also
11296          * a chance to reject the mode entirely.
11297          */
11298         for_each_new_connector_in_state(state, connector, connector_state, i) {
11299                 if (connector_state->crtc != crtc)
11300                         continue;
11301
11302                 encoder = to_intel_encoder(connector_state->best_encoder);
11303
11304                 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
11305                         DRM_DEBUG_KMS("Encoder config failure\n");
11306                         goto fail;
11307                 }
11308         }
11309
11310         /* Set default port clock if not overwritten by the encoder. Needs to be
11311          * done afterwards in case the encoder adjusts the mode. */
11312         if (!pipe_config->port_clock)
11313                 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
11314                         * pipe_config->pixel_multiplier;
11315
11316         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
11317         if (ret < 0) {
11318                 DRM_DEBUG_KMS("CRTC fixup failed\n");
11319                 goto fail;
11320         }
11321
11322         if (ret == RETRY) {
11323                 if (WARN(!retry, "loop in pipe configuration computation\n")) {
11324                         ret = -EINVAL;
11325                         goto fail;
11326                 }
11327
11328                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11329                 retry = false;
11330                 goto encoder_retry;
11331         }
11332
11333         /* Dithering seems to not pass-through bits correctly when it should, so
11334          * only enable it on 6bpc panels and when its not a compliance
11335          * test requesting 6bpc video pattern.
11336          */
11337         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
11338                 !pipe_config->dither_force_disable;
11339         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
11340                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11341
11342 fail:
11343         return ret;
11344 }
11345
11346 static bool intel_fuzzy_clock_check(int clock1, int clock2)
11347 {
11348         int diff;
11349
11350         if (clock1 == clock2)
11351                 return true;
11352
11353         if (!clock1 || !clock2)
11354                 return false;
11355
11356         diff = abs(clock1 - clock2);
11357
11358         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11359                 return true;
11360
11361         return false;
11362 }
11363
11364 static bool
11365 intel_compare_m_n(unsigned int m, unsigned int n,
11366                   unsigned int m2, unsigned int n2,
11367                   bool exact)
11368 {
11369         if (m == m2 && n == n2)
11370                 return true;
11371
11372         if (exact || !m || !n || !m2 || !n2)
11373                 return false;
11374
11375         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11376
11377         if (n > n2) {
11378                 while (n > n2) {
11379                         m2 <<= 1;
11380                         n2 <<= 1;
11381                 }
11382         } else if (n < n2) {
11383                 while (n < n2) {
11384                         m <<= 1;
11385                         n <<= 1;
11386                 }
11387         }
11388
11389         if (n != n2)
11390                 return false;
11391
11392         return intel_fuzzy_clock_check(m, m2);
11393 }
11394
11395 static bool
11396 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11397                        struct intel_link_m_n *m2_n2,
11398                        bool adjust)
11399 {
11400         if (m_n->tu == m2_n2->tu &&
11401             intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
11402                               m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
11403             intel_compare_m_n(m_n->link_m, m_n->link_n,
11404                               m2_n2->link_m, m2_n2->link_n, !adjust)) {
11405                 if (adjust)
11406                         *m2_n2 = *m_n;
11407
11408                 return true;
11409         }
11410
11411         return false;
11412 }
11413
11414 static void __printf(3, 4)
11415 pipe_config_err(bool adjust, const char *name, const char *format, ...)
11416 {
11417         struct va_format vaf;
11418         va_list args;
11419
11420         va_start(args, format);
11421         vaf.fmt = format;
11422         vaf.va = &args;
11423
11424         if (adjust)
11425                 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
11426         else
11427                 drm_err("mismatch in %s %pV", name, &vaf);
11428
11429         va_end(args);
11430 }
11431
11432 static bool
11433 intel_pipe_config_compare(struct drm_i915_private *dev_priv,
11434                           struct intel_crtc_state *current_config,
11435                           struct intel_crtc_state *pipe_config,
11436                           bool adjust)
11437 {
11438         bool ret = true;
11439         bool fixup_inherited = adjust &&
11440                 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
11441                 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
11442
11443 #define PIPE_CONF_CHECK_X(name) do { \
11444         if (current_config->name != pipe_config->name) { \
11445                 pipe_config_err(adjust, __stringify(name), \
11446                           "(expected 0x%08x, found 0x%08x)\n", \
11447                           current_config->name, \
11448                           pipe_config->name); \
11449                 ret = false; \
11450         } \
11451 } while (0)
11452
11453 #define PIPE_CONF_CHECK_I(name) do { \
11454         if (current_config->name != pipe_config->name) { \
11455                 pipe_config_err(adjust, __stringify(name), \
11456                           "(expected %i, found %i)\n", \
11457                           current_config->name, \
11458                           pipe_config->name); \
11459                 ret = false; \
11460         } \
11461 } while (0)
11462
11463 #define PIPE_CONF_CHECK_BOOL(name) do { \
11464         if (current_config->name != pipe_config->name) { \
11465                 pipe_config_err(adjust, __stringify(name), \
11466                           "(expected %s, found %s)\n", \
11467                           yesno(current_config->name), \
11468                           yesno(pipe_config->name)); \
11469                 ret = false; \
11470         } \
11471 } while (0)
11472
11473 /*
11474  * Checks state where we only read out the enabling, but not the entire
11475  * state itself (like full infoframes or ELD for audio). These states
11476  * require a full modeset on bootup to fix up.
11477  */
11478 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
11479         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
11480                 PIPE_CONF_CHECK_BOOL(name); \
11481         } else { \
11482                 pipe_config_err(adjust, __stringify(name), \
11483                           "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
11484                           yesno(current_config->name), \
11485                           yesno(pipe_config->name)); \
11486                 ret = false; \
11487         } \
11488 } while (0)
11489
11490 #define PIPE_CONF_CHECK_P(name) do { \
11491         if (current_config->name != pipe_config->name) { \
11492                 pipe_config_err(adjust, __stringify(name), \
11493                           "(expected %p, found %p)\n", \
11494                           current_config->name, \
11495                           pipe_config->name); \
11496                 ret = false; \
11497         } \
11498 } while (0)
11499
11500 #define PIPE_CONF_CHECK_M_N(name) do { \
11501         if (!intel_compare_link_m_n(&current_config->name, \
11502                                     &pipe_config->name,\
11503                                     adjust)) { \
11504                 pipe_config_err(adjust, __stringify(name), \
11505                           "(expected tu %i gmch %i/%i link %i/%i, " \
11506                           "found tu %i, gmch %i/%i link %i/%i)\n", \
11507                           current_config->name.tu, \
11508                           current_config->name.gmch_m, \
11509                           current_config->name.gmch_n, \
11510                           current_config->name.link_m, \
11511                           current_config->name.link_n, \
11512                           pipe_config->name.tu, \
11513                           pipe_config->name.gmch_m, \
11514                           pipe_config->name.gmch_n, \
11515                           pipe_config->name.link_m, \
11516                           pipe_config->name.link_n); \
11517                 ret = false; \
11518         } \
11519 } while (0)
11520
11521 /* This is required for BDW+ where there is only one set of registers for
11522  * switching between high and low RR.
11523  * This macro can be used whenever a comparison has to be made between one
11524  * hw state and multiple sw state variables.
11525  */
11526 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
11527         if (!intel_compare_link_m_n(&current_config->name, \
11528                                     &pipe_config->name, adjust) && \
11529             !intel_compare_link_m_n(&current_config->alt_name, \
11530                                     &pipe_config->name, adjust)) { \
11531                 pipe_config_err(adjust, __stringify(name), \
11532                           "(expected tu %i gmch %i/%i link %i/%i, " \
11533                           "or tu %i gmch %i/%i link %i/%i, " \
11534                           "found tu %i, gmch %i/%i link %i/%i)\n", \
11535                           current_config->name.tu, \
11536                           current_config->name.gmch_m, \
11537                           current_config->name.gmch_n, \
11538                           current_config->name.link_m, \
11539                           current_config->name.link_n, \
11540                           current_config->alt_name.tu, \
11541                           current_config->alt_name.gmch_m, \
11542                           current_config->alt_name.gmch_n, \
11543                           current_config->alt_name.link_m, \
11544                           current_config->alt_name.link_n, \
11545                           pipe_config->name.tu, \
11546                           pipe_config->name.gmch_m, \
11547                           pipe_config->name.gmch_n, \
11548                           pipe_config->name.link_m, \
11549                           pipe_config->name.link_n); \
11550                 ret = false; \
11551         } \
11552 } while (0)
11553
11554 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
11555         if ((current_config->name ^ pipe_config->name) & (mask)) { \
11556                 pipe_config_err(adjust, __stringify(name), \
11557                           "(%x) (expected %i, found %i)\n", \
11558                           (mask), \
11559                           current_config->name & (mask), \
11560                           pipe_config->name & (mask)); \
11561                 ret = false; \
11562         } \
11563 } while (0)
11564
11565 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
11566         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
11567                 pipe_config_err(adjust, __stringify(name), \
11568                           "(expected %i, found %i)\n", \
11569                           current_config->name, \
11570                           pipe_config->name); \
11571                 ret = false; \
11572         } \
11573 } while (0)
11574
11575 #define PIPE_CONF_QUIRK(quirk)  \
11576         ((current_config->quirks | pipe_config->quirks) & (quirk))
11577
11578         PIPE_CONF_CHECK_I(cpu_transcoder);
11579
11580         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
11581         PIPE_CONF_CHECK_I(fdi_lanes);
11582         PIPE_CONF_CHECK_M_N(fdi_m_n);
11583
11584         PIPE_CONF_CHECK_I(lane_count);
11585         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
11586
11587         if (INTEL_GEN(dev_priv) < 8) {
11588                 PIPE_CONF_CHECK_M_N(dp_m_n);
11589
11590                 if (current_config->has_drrs)
11591                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
11592         } else
11593                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
11594
11595         PIPE_CONF_CHECK_X(output_types);
11596
11597         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
11598         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
11599         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
11600         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
11601         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
11602         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
11603
11604         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
11605         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
11606         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
11607         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
11608         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
11609         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
11610
11611         PIPE_CONF_CHECK_I(pixel_multiplier);
11612         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
11613         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
11614             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11615                 PIPE_CONF_CHECK_BOOL(limited_color_range);
11616
11617         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
11618         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
11619         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
11620         PIPE_CONF_CHECK_BOOL(ycbcr420);
11621
11622         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
11623
11624         PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11625                               DRM_MODE_FLAG_INTERLACE);
11626
11627         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
11628                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11629                                       DRM_MODE_FLAG_PHSYNC);
11630                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11631                                       DRM_MODE_FLAG_NHSYNC);
11632                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11633                                       DRM_MODE_FLAG_PVSYNC);
11634                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11635                                       DRM_MODE_FLAG_NVSYNC);
11636         }
11637
11638         PIPE_CONF_CHECK_X(gmch_pfit.control);
11639         /* pfit ratios are autocomputed by the hw on gen4+ */
11640         if (INTEL_GEN(dev_priv) < 4)
11641                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
11642         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
11643
11644         if (!adjust) {
11645                 PIPE_CONF_CHECK_I(pipe_src_w);
11646                 PIPE_CONF_CHECK_I(pipe_src_h);
11647
11648                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
11649                 if (current_config->pch_pfit.enabled) {
11650                         PIPE_CONF_CHECK_X(pch_pfit.pos);
11651                         PIPE_CONF_CHECK_X(pch_pfit.size);
11652                 }
11653
11654                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
11655                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
11656         }
11657
11658         PIPE_CONF_CHECK_BOOL(double_wide);
11659
11660         PIPE_CONF_CHECK_P(shared_dpll);
11661         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
11662         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
11663         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
11664         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
11665         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
11666         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
11667         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
11668         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
11669         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
11670         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
11671         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
11672         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
11673         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
11674         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
11675         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
11676         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
11677         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
11678         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
11679         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
11680         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
11681         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
11682         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
11683         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
11684         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
11685         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
11686         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
11687         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
11688         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
11689         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
11690         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
11691         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
11692
11693         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
11694         PIPE_CONF_CHECK_X(dsi_pll.div);
11695
11696         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
11697                 PIPE_CONF_CHECK_I(pipe_bpp);
11698
11699         PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
11700         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
11701
11702         PIPE_CONF_CHECK_I(min_voltage_level);
11703
11704 #undef PIPE_CONF_CHECK_X
11705 #undef PIPE_CONF_CHECK_I
11706 #undef PIPE_CONF_CHECK_BOOL
11707 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
11708 #undef PIPE_CONF_CHECK_P
11709 #undef PIPE_CONF_CHECK_FLAGS
11710 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
11711 #undef PIPE_CONF_QUIRK
11712
11713         return ret;
11714 }
11715
11716 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
11717                                            const struct intel_crtc_state *pipe_config)
11718 {
11719         if (pipe_config->has_pch_encoder) {
11720                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11721                                                             &pipe_config->fdi_m_n);
11722                 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
11723
11724                 /*
11725                  * FDI already provided one idea for the dotclock.
11726                  * Yell if the encoder disagrees.
11727                  */
11728                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
11729                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
11730                      fdi_dotclock, dotclock);
11731         }
11732 }
11733
11734 static void verify_wm_state(struct drm_crtc *crtc,
11735                             struct drm_crtc_state *new_state)
11736 {
11737         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11738         struct skl_ddb_allocation hw_ddb, *sw_ddb;
11739         struct skl_pipe_wm hw_wm, *sw_wm;
11740         struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
11741         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
11742         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11743         const enum pipe pipe = intel_crtc->pipe;
11744         int plane, level, max_level = ilk_wm_max_level(dev_priv);
11745
11746         if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
11747                 return;
11748
11749         skl_pipe_wm_get_hw_state(crtc, &hw_wm);
11750         sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
11751
11752         skl_ddb_get_hw_state(dev_priv, &hw_ddb);
11753         sw_ddb = &dev_priv->wm.skl_hw.ddb;
11754
11755         if (INTEL_GEN(dev_priv) >= 11)
11756                 if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
11757                         DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
11758                                   sw_ddb->enabled_slices,
11759                                   hw_ddb.enabled_slices);
11760         /* planes */
11761         for_each_universal_plane(dev_priv, pipe, plane) {
11762                 hw_plane_wm = &hw_wm.planes[plane];
11763                 sw_plane_wm = &sw_wm->planes[plane];
11764
11765                 /* Watermarks */
11766                 for (level = 0; level <= max_level; level++) {
11767                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11768                                                 &sw_plane_wm->wm[level]))
11769                                 continue;
11770
11771                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11772                                   pipe_name(pipe), plane + 1, level,
11773                                   sw_plane_wm->wm[level].plane_en,
11774                                   sw_plane_wm->wm[level].plane_res_b,
11775                                   sw_plane_wm->wm[level].plane_res_l,
11776                                   hw_plane_wm->wm[level].plane_en,
11777                                   hw_plane_wm->wm[level].plane_res_b,
11778                                   hw_plane_wm->wm[level].plane_res_l);
11779                 }
11780
11781                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11782                                          &sw_plane_wm->trans_wm)) {
11783                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11784                                   pipe_name(pipe), plane + 1,
11785                                   sw_plane_wm->trans_wm.plane_en,
11786                                   sw_plane_wm->trans_wm.plane_res_b,
11787                                   sw_plane_wm->trans_wm.plane_res_l,
11788                                   hw_plane_wm->trans_wm.plane_en,
11789                                   hw_plane_wm->trans_wm.plane_res_b,
11790                                   hw_plane_wm->trans_wm.plane_res_l);
11791                 }
11792
11793                 /* DDB */
11794                 hw_ddb_entry = &hw_ddb.plane[pipe][plane];
11795                 sw_ddb_entry = &sw_ddb->plane[pipe][plane];
11796
11797                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
11798                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
11799                                   pipe_name(pipe), plane + 1,
11800                                   sw_ddb_entry->start, sw_ddb_entry->end,
11801                                   hw_ddb_entry->start, hw_ddb_entry->end);
11802                 }
11803         }
11804
11805         /*
11806          * cursor
11807          * If the cursor plane isn't active, we may not have updated it's ddb
11808          * allocation. In that case since the ddb allocation will be updated
11809          * once the plane becomes visible, we can skip this check
11810          */
11811         if (1) {
11812                 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
11813                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
11814
11815                 /* Watermarks */
11816                 for (level = 0; level <= max_level; level++) {
11817                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11818                                                 &sw_plane_wm->wm[level]))
11819                                 continue;
11820
11821                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11822                                   pipe_name(pipe), level,
11823                                   sw_plane_wm->wm[level].plane_en,
11824                                   sw_plane_wm->wm[level].plane_res_b,
11825                                   sw_plane_wm->wm[level].plane_res_l,
11826                                   hw_plane_wm->wm[level].plane_en,
11827                                   hw_plane_wm->wm[level].plane_res_b,
11828                                   hw_plane_wm->wm[level].plane_res_l);
11829                 }
11830
11831                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11832                                          &sw_plane_wm->trans_wm)) {
11833                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11834                                   pipe_name(pipe),
11835                                   sw_plane_wm->trans_wm.plane_en,
11836                                   sw_plane_wm->trans_wm.plane_res_b,
11837                                   sw_plane_wm->trans_wm.plane_res_l,
11838                                   hw_plane_wm->trans_wm.plane_en,
11839                                   hw_plane_wm->trans_wm.plane_res_b,
11840                                   hw_plane_wm->trans_wm.plane_res_l);
11841                 }
11842
11843                 /* DDB */
11844                 hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
11845                 sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
11846
11847                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
11848                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
11849                                   pipe_name(pipe),
11850                                   sw_ddb_entry->start, sw_ddb_entry->end,
11851                                   hw_ddb_entry->start, hw_ddb_entry->end);
11852                 }
11853         }
11854 }
11855
11856 static void
11857 verify_connector_state(struct drm_device *dev,
11858                        struct drm_atomic_state *state,
11859                        struct drm_crtc *crtc)
11860 {
11861         struct drm_connector *connector;
11862         struct drm_connector_state *new_conn_state;
11863         int i;
11864
11865         for_each_new_connector_in_state(state, connector, new_conn_state, i) {
11866                 struct drm_encoder *encoder = connector->encoder;
11867                 struct drm_crtc_state *crtc_state = NULL;
11868
11869                 if (new_conn_state->crtc != crtc)
11870                         continue;
11871
11872                 if (crtc)
11873                         crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
11874
11875                 intel_connector_verify_state(crtc_state, new_conn_state);
11876
11877                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
11878                      "connector's atomic encoder doesn't match legacy encoder\n");
11879         }
11880 }
11881
11882 static void
11883 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
11884 {
11885         struct intel_encoder *encoder;
11886         struct drm_connector *connector;
11887         struct drm_connector_state *old_conn_state, *new_conn_state;
11888         int i;
11889
11890         for_each_intel_encoder(dev, encoder) {
11891                 bool enabled = false, found = false;
11892                 enum pipe pipe;
11893
11894                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
11895                               encoder->base.base.id,
11896                               encoder->base.name);
11897
11898                 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
11899                                                    new_conn_state, i) {
11900                         if (old_conn_state->best_encoder == &encoder->base)
11901                                 found = true;
11902
11903                         if (new_conn_state->best_encoder != &encoder->base)
11904                                 continue;
11905                         found = enabled = true;
11906
11907                         I915_STATE_WARN(new_conn_state->crtc !=
11908                                         encoder->base.crtc,
11909                              "connector's crtc doesn't match encoder crtc\n");
11910                 }
11911
11912                 if (!found)
11913                         continue;
11914
11915                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
11916                      "encoder's enabled state mismatch "
11917                      "(expected %i, found %i)\n",
11918                      !!encoder->base.crtc, enabled);
11919
11920                 if (!encoder->base.crtc) {
11921                         bool active;
11922
11923                         active = encoder->get_hw_state(encoder, &pipe);
11924                         I915_STATE_WARN(active,
11925                              "encoder detached but still enabled on pipe %c.\n",
11926                              pipe_name(pipe));
11927                 }
11928         }
11929 }
11930
11931 static void
11932 verify_crtc_state(struct drm_crtc *crtc,
11933                   struct drm_crtc_state *old_crtc_state,
11934                   struct drm_crtc_state *new_crtc_state)
11935 {
11936         struct drm_device *dev = crtc->dev;
11937         struct drm_i915_private *dev_priv = to_i915(dev);
11938         struct intel_encoder *encoder;
11939         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11940         struct intel_crtc_state *pipe_config, *sw_config;
11941         struct drm_atomic_state *old_state;
11942         bool active;
11943
11944         old_state = old_crtc_state->state;
11945         __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
11946         pipe_config = to_intel_crtc_state(old_crtc_state);
11947         memset(pipe_config, 0, sizeof(*pipe_config));
11948         pipe_config->base.crtc = crtc;
11949         pipe_config->base.state = old_state;
11950
11951         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
11952
11953         active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
11954
11955         /* we keep both pipes enabled on 830 */
11956         if (IS_I830(dev_priv))
11957                 active = new_crtc_state->active;
11958
11959         I915_STATE_WARN(new_crtc_state->active != active,
11960              "crtc active state doesn't match with hw state "
11961              "(expected %i, found %i)\n", new_crtc_state->active, active);
11962
11963         I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
11964              "transitional active state does not match atomic hw state "
11965              "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
11966
11967         for_each_encoder_on_crtc(dev, crtc, encoder) {
11968                 enum pipe pipe;
11969
11970                 active = encoder->get_hw_state(encoder, &pipe);
11971                 I915_STATE_WARN(active != new_crtc_state->active,
11972                         "[ENCODER:%i] active %i with crtc active %i\n",
11973                         encoder->base.base.id, active, new_crtc_state->active);
11974
11975                 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
11976                                 "Encoder connected to wrong pipe %c\n",
11977                                 pipe_name(pipe));
11978
11979                 if (active)
11980                         encoder->get_config(encoder, pipe_config);
11981         }
11982
11983         intel_crtc_compute_pixel_rate(pipe_config);
11984
11985         if (!new_crtc_state->active)
11986                 return;
11987
11988         intel_pipe_config_sanity_check(dev_priv, pipe_config);
11989
11990         sw_config = to_intel_crtc_state(new_crtc_state);
11991         if (!intel_pipe_config_compare(dev_priv, sw_config,
11992                                        pipe_config, false)) {
11993                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
11994                 intel_dump_pipe_config(intel_crtc, pipe_config,
11995                                        "[hw state]");
11996                 intel_dump_pipe_config(intel_crtc, sw_config,
11997                                        "[sw state]");
11998         }
11999 }
12000
12001 static void
12002 intel_verify_planes(struct intel_atomic_state *state)
12003 {
12004         struct intel_plane *plane;
12005         const struct intel_plane_state *plane_state;
12006         int i;
12007
12008         for_each_new_intel_plane_in_state(state, plane,
12009                                           plane_state, i)
12010                 assert_plane(plane, plane_state->base.visible);
12011 }
12012
12013 static void
12014 verify_single_dpll_state(struct drm_i915_private *dev_priv,
12015                          struct intel_shared_dpll *pll,
12016                          struct drm_crtc *crtc,
12017                          struct drm_crtc_state *new_state)
12018 {
12019         struct intel_dpll_hw_state dpll_hw_state;
12020         unsigned int crtc_mask;
12021         bool active;
12022
12023         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12024
12025         DRM_DEBUG_KMS("%s\n", pll->info->name);
12026
12027         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
12028
12029         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
12030                 I915_STATE_WARN(!pll->on && pll->active_mask,
12031                      "pll in active use but not on in sw tracking\n");
12032                 I915_STATE_WARN(pll->on && !pll->active_mask,
12033                      "pll is on but not used by any active crtc\n");
12034                 I915_STATE_WARN(pll->on != active,
12035                      "pll on state mismatch (expected %i, found %i)\n",
12036                      pll->on, active);
12037         }
12038
12039         if (!crtc) {
12040                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
12041                                 "more active pll users than references: %x vs %x\n",
12042                                 pll->active_mask, pll->state.crtc_mask);
12043
12044                 return;
12045         }
12046
12047         crtc_mask = drm_crtc_mask(crtc);
12048
12049         if (new_state->active)
12050                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12051                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12052                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12053         else
12054                 I915_STATE_WARN(pll->active_mask & crtc_mask,
12055                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12056                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12057
12058         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
12059                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
12060                         crtc_mask, pll->state.crtc_mask);
12061
12062         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
12063                                           &dpll_hw_state,
12064                                           sizeof(dpll_hw_state)),
12065                         "pll hw state mismatch\n");
12066 }
12067
12068 static void
12069 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12070                          struct drm_crtc_state *old_crtc_state,
12071                          struct drm_crtc_state *new_crtc_state)
12072 {
12073         struct drm_i915_private *dev_priv = to_i915(dev);
12074         struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12075         struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12076
12077         if (new_state->shared_dpll)
12078                 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
12079
12080         if (old_state->shared_dpll &&
12081             old_state->shared_dpll != new_state->shared_dpll) {
12082                 unsigned int crtc_mask = drm_crtc_mask(crtc);
12083                 struct intel_shared_dpll *pll = old_state->shared_dpll;
12084
12085                 I915_STATE_WARN(pll->active_mask & crtc_mask,
12086                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
12087                                 pipe_name(drm_crtc_index(crtc)));
12088                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
12089                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
12090                                 pipe_name(drm_crtc_index(crtc)));
12091         }
12092 }
12093
12094 static void
12095 intel_modeset_verify_crtc(struct drm_crtc *crtc,
12096                           struct drm_atomic_state *state,
12097                           struct drm_crtc_state *old_state,
12098                           struct drm_crtc_state *new_state)
12099 {
12100         if (!needs_modeset(new_state) &&
12101             !to_intel_crtc_state(new_state)->update_pipe)
12102                 return;
12103
12104         verify_wm_state(crtc, new_state);
12105         verify_connector_state(crtc->dev, state, crtc);
12106         verify_crtc_state(crtc, old_state, new_state);
12107         verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
12108 }
12109
12110 static void
12111 verify_disabled_dpll_state(struct drm_device *dev)
12112 {
12113         struct drm_i915_private *dev_priv = to_i915(dev);
12114         int i;
12115
12116         for (i = 0; i < dev_priv->num_shared_dpll; i++)
12117                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
12118 }
12119
12120 static void
12121 intel_modeset_verify_disabled(struct drm_device *dev,
12122                               struct drm_atomic_state *state)
12123 {
12124         verify_encoder_state(dev, state);
12125         verify_connector_state(dev, state, NULL);
12126         verify_disabled_dpll_state(dev);
12127 }
12128
12129 static void update_scanline_offset(struct intel_crtc *crtc)
12130 {
12131         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12132
12133         /*
12134          * The scanline counter increments at the leading edge of hsync.
12135          *
12136          * On most platforms it starts counting from vtotal-1 on the
12137          * first active line. That means the scanline counter value is
12138          * always one less than what we would expect. Ie. just after
12139          * start of vblank, which also occurs at start of hsync (on the
12140          * last active line), the scanline counter will read vblank_start-1.
12141          *
12142          * On gen2 the scanline counter starts counting from 1 instead
12143          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12144          * to keep the value positive), instead of adding one.
12145          *
12146          * On HSW+ the behaviour of the scanline counter depends on the output
12147          * type. For DP ports it behaves like most other platforms, but on HDMI
12148          * there's an extra 1 line difference. So we need to add two instead of
12149          * one to the value.
12150          *
12151          * On VLV/CHV DSI the scanline counter would appear to increment
12152          * approx. 1/3 of a scanline before start of vblank. Unfortunately
12153          * that means we can't tell whether we're in vblank or not while
12154          * we're on that particular line. We must still set scanline_offset
12155          * to 1 so that the vblank timestamps come out correct when we query
12156          * the scanline counter from within the vblank interrupt handler.
12157          * However if queried just before the start of vblank we'll get an
12158          * answer that's slightly in the future.
12159          */
12160         if (IS_GEN2(dev_priv)) {
12161                 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
12162                 int vtotal;
12163
12164                 vtotal = adjusted_mode->crtc_vtotal;
12165                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
12166                         vtotal /= 2;
12167
12168                 crtc->scanline_offset = vtotal - 1;
12169         } else if (HAS_DDI(dev_priv) &&
12170                    intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
12171                 crtc->scanline_offset = 2;
12172         } else
12173                 crtc->scanline_offset = 1;
12174 }
12175
12176 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
12177 {
12178         struct drm_device *dev = state->dev;
12179         struct drm_i915_private *dev_priv = to_i915(dev);
12180         struct drm_crtc *crtc;
12181         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12182         int i;
12183
12184         if (!dev_priv->display.crtc_compute_clock)
12185                 return;
12186
12187         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12188                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12189                 struct intel_shared_dpll *old_dpll =
12190                         to_intel_crtc_state(old_crtc_state)->shared_dpll;
12191
12192                 if (!needs_modeset(new_crtc_state))
12193                         continue;
12194
12195                 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
12196
12197                 if (!old_dpll)
12198                         continue;
12199
12200                 intel_release_shared_dpll(old_dpll, intel_crtc, state);
12201         }
12202 }
12203
12204 /*
12205  * This implements the workaround described in the "notes" section of the mode
12206  * set sequence documentation. When going from no pipes or single pipe to
12207  * multiple pipes, and planes are enabled after the pipe, we need to wait at
12208  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12209  */
12210 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12211 {
12212         struct drm_crtc_state *crtc_state;
12213         struct intel_crtc *intel_crtc;
12214         struct drm_crtc *crtc;
12215         struct intel_crtc_state *first_crtc_state = NULL;
12216         struct intel_crtc_state *other_crtc_state = NULL;
12217         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12218         int i;
12219
12220         /* look at all crtc's that are going to be enabled in during modeset */
12221         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
12222                 intel_crtc = to_intel_crtc(crtc);
12223
12224                 if (!crtc_state->active || !needs_modeset(crtc_state))
12225                         continue;
12226
12227                 if (first_crtc_state) {
12228                         other_crtc_state = to_intel_crtc_state(crtc_state);
12229                         break;
12230                 } else {
12231                         first_crtc_state = to_intel_crtc_state(crtc_state);
12232                         first_pipe = intel_crtc->pipe;
12233                 }
12234         }
12235
12236         /* No workaround needed? */
12237         if (!first_crtc_state)
12238                 return 0;
12239
12240         /* w/a possibly needed, check how many crtc's are already enabled. */
12241         for_each_intel_crtc(state->dev, intel_crtc) {
12242                 struct intel_crtc_state *pipe_config;
12243
12244                 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12245                 if (IS_ERR(pipe_config))
12246                         return PTR_ERR(pipe_config);
12247
12248                 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12249
12250                 if (!pipe_config->base.active ||
12251                     needs_modeset(&pipe_config->base))
12252                         continue;
12253
12254                 /* 2 or more enabled crtcs means no need for w/a */
12255                 if (enabled_pipe != INVALID_PIPE)
12256                         return 0;
12257
12258                 enabled_pipe = intel_crtc->pipe;
12259         }
12260
12261         if (enabled_pipe != INVALID_PIPE)
12262                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
12263         else if (other_crtc_state)
12264                 other_crtc_state->hsw_workaround_pipe = first_pipe;
12265
12266         return 0;
12267 }
12268
12269 static int intel_lock_all_pipes(struct drm_atomic_state *state)
12270 {
12271         struct drm_crtc *crtc;
12272
12273         /* Add all pipes to the state */
12274         for_each_crtc(state->dev, crtc) {
12275                 struct drm_crtc_state *crtc_state;
12276
12277                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12278                 if (IS_ERR(crtc_state))
12279                         return PTR_ERR(crtc_state);
12280         }
12281
12282         return 0;
12283 }
12284
12285 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12286 {
12287         struct drm_crtc *crtc;
12288
12289         /*
12290          * Add all pipes to the state, and force
12291          * a modeset on all the active ones.
12292          */
12293         for_each_crtc(state->dev, crtc) {
12294                 struct drm_crtc_state *crtc_state;
12295                 int ret;
12296
12297                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12298                 if (IS_ERR(crtc_state))
12299                         return PTR_ERR(crtc_state);
12300
12301                 if (!crtc_state->active || needs_modeset(crtc_state))
12302                         continue;
12303
12304                 crtc_state->mode_changed = true;
12305
12306                 ret = drm_atomic_add_affected_connectors(state, crtc);
12307                 if (ret)
12308                         return ret;
12309
12310                 ret = drm_atomic_add_affected_planes(state, crtc);
12311                 if (ret)
12312                         return ret;
12313         }
12314
12315         return 0;
12316 }
12317
12318 static int intel_modeset_checks(struct drm_atomic_state *state)
12319 {
12320         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12321         struct drm_i915_private *dev_priv = to_i915(state->dev);
12322         struct drm_crtc *crtc;
12323         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12324         int ret = 0, i;
12325
12326         if (!check_digital_port_conflicts(state)) {
12327                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
12328                 return -EINVAL;
12329         }
12330
12331         intel_state->modeset = true;
12332         intel_state->active_crtcs = dev_priv->active_crtcs;
12333         intel_state->cdclk.logical = dev_priv->cdclk.logical;
12334         intel_state->cdclk.actual = dev_priv->cdclk.actual;
12335
12336         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12337                 if (new_crtc_state->active)
12338                         intel_state->active_crtcs |= 1 << i;
12339                 else
12340                         intel_state->active_crtcs &= ~(1 << i);
12341
12342                 if (old_crtc_state->active != new_crtc_state->active)
12343                         intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
12344         }
12345
12346         /*
12347          * See if the config requires any additional preparation, e.g.
12348          * to adjust global state with pipes off.  We need to do this
12349          * here so we can get the modeset_pipe updated config for the new
12350          * mode set on this crtc.  For other crtcs we need to use the
12351          * adjusted_mode bits in the crtc directly.
12352          */
12353         if (dev_priv->display.modeset_calc_cdclk) {
12354                 ret = dev_priv->display.modeset_calc_cdclk(state);
12355                 if (ret < 0)
12356                         return ret;
12357
12358                 /*
12359                  * Writes to dev_priv->cdclk.logical must protected by
12360                  * holding all the crtc locks, even if we don't end up
12361                  * touching the hardware
12362                  */
12363                 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
12364                                         &intel_state->cdclk.logical)) {
12365                         ret = intel_lock_all_pipes(state);
12366                         if (ret < 0)
12367                                 return ret;
12368                 }
12369
12370                 /* All pipes must be switched off while we change the cdclk. */
12371                 if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
12372                                               &intel_state->cdclk.actual)) {
12373                         ret = intel_modeset_all_pipes(state);
12374                         if (ret < 0)
12375                                 return ret;
12376                 }
12377
12378                 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
12379                               intel_state->cdclk.logical.cdclk,
12380                               intel_state->cdclk.actual.cdclk);
12381                 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
12382                               intel_state->cdclk.logical.voltage_level,
12383                               intel_state->cdclk.actual.voltage_level);
12384         } else {
12385                 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
12386         }
12387
12388         intel_modeset_clear_plls(state);
12389
12390         if (IS_HASWELL(dev_priv))
12391                 return haswell_mode_set_planes_workaround(state);
12392
12393         return 0;
12394 }
12395
12396 /*
12397  * Handle calculation of various watermark data at the end of the atomic check
12398  * phase.  The code here should be run after the per-crtc and per-plane 'check'
12399  * handlers to ensure that all derived state has been updated.
12400  */
12401 static int calc_watermark_data(struct drm_atomic_state *state)
12402 {
12403         struct drm_device *dev = state->dev;
12404         struct drm_i915_private *dev_priv = to_i915(dev);
12405
12406         /* Is there platform-specific watermark information to calculate? */
12407         if (dev_priv->display.compute_global_watermarks)
12408                 return dev_priv->display.compute_global_watermarks(state);
12409
12410         return 0;
12411 }
12412
12413 /**
12414  * intel_atomic_check - validate state object
12415  * @dev: drm device
12416  * @state: state to validate
12417  */
12418 static int intel_atomic_check(struct drm_device *dev,
12419                               struct drm_atomic_state *state)
12420 {
12421         struct drm_i915_private *dev_priv = to_i915(dev);
12422         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12423         struct drm_crtc *crtc;
12424         struct drm_crtc_state *old_crtc_state, *crtc_state;
12425         int ret, i;
12426         bool any_ms = false;
12427
12428         /* Catch I915_MODE_FLAG_INHERITED */
12429         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
12430                                       crtc_state, i) {
12431                 if (crtc_state->mode.private_flags !=
12432                     old_crtc_state->mode.private_flags)
12433                         crtc_state->mode_changed = true;
12434         }
12435
12436         ret = drm_atomic_helper_check_modeset(dev, state);
12437         if (ret)
12438                 return ret;
12439
12440         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
12441                 struct intel_crtc_state *pipe_config =
12442                         to_intel_crtc_state(crtc_state);
12443
12444                 if (!needs_modeset(crtc_state))
12445                         continue;
12446
12447                 if (!crtc_state->enable) {
12448                         any_ms = true;
12449                         continue;
12450                 }
12451
12452                 ret = intel_modeset_pipe_config(crtc, pipe_config);
12453                 if (ret) {
12454                         intel_dump_pipe_config(to_intel_crtc(crtc),
12455                                                pipe_config, "[failed]");
12456                         return ret;
12457                 }
12458
12459                 if (i915_modparams.fastboot &&
12460                     intel_pipe_config_compare(dev_priv,
12461                                         to_intel_crtc_state(old_crtc_state),
12462                                         pipe_config, true)) {
12463                         crtc_state->mode_changed = false;
12464                         pipe_config->update_pipe = true;
12465                 }
12466
12467                 if (needs_modeset(crtc_state))
12468                         any_ms = true;
12469
12470                 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
12471                                        needs_modeset(crtc_state) ?
12472                                        "[modeset]" : "[fastset]");
12473         }
12474
12475         if (any_ms) {
12476                 ret = intel_modeset_checks(state);
12477
12478                 if (ret)
12479                         return ret;
12480         } else {
12481                 intel_state->cdclk.logical = dev_priv->cdclk.logical;
12482         }
12483
12484         ret = drm_atomic_helper_check_planes(dev, state);
12485         if (ret)
12486                 return ret;
12487
12488         intel_fbc_choose_crtc(dev_priv, intel_state);
12489         return calc_watermark_data(state);
12490 }
12491
12492 static int intel_atomic_prepare_commit(struct drm_device *dev,
12493                                        struct drm_atomic_state *state)
12494 {
12495         return drm_atomic_helper_prepare_planes(dev, state);
12496 }
12497
12498 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
12499 {
12500         struct drm_device *dev = crtc->base.dev;
12501
12502         if (!dev->max_vblank_count)
12503                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
12504
12505         return dev->driver->get_vblank_counter(dev, crtc->pipe);
12506 }
12507
12508 static void intel_update_crtc(struct drm_crtc *crtc,
12509                               struct drm_atomic_state *state,
12510                               struct drm_crtc_state *old_crtc_state,
12511                               struct drm_crtc_state *new_crtc_state)
12512 {
12513         struct drm_device *dev = crtc->dev;
12514         struct drm_i915_private *dev_priv = to_i915(dev);
12515         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12516         struct intel_crtc_state *old_intel_cstate = to_intel_crtc_state(old_crtc_state);
12517         struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
12518         bool modeset = needs_modeset(new_crtc_state);
12519         struct intel_plane_state *new_plane_state =
12520                 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
12521                                                  to_intel_plane(crtc->primary));
12522
12523         if (modeset) {
12524                 update_scanline_offset(intel_crtc);
12525                 dev_priv->display.crtc_enable(pipe_config, state);
12526
12527                 /* vblanks work again, re-enable pipe CRC. */
12528                 intel_crtc_enable_pipe_crc(intel_crtc);
12529         } else {
12530                 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12531                                        pipe_config);
12532         }
12533
12534         if (new_plane_state)
12535                 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
12536
12537         intel_begin_crtc_commit(crtc, old_crtc_state);
12538
12539         intel_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc,
12540                                     old_intel_cstate, pipe_config);
12541
12542         intel_finish_crtc_commit(crtc, old_crtc_state);
12543 }
12544
12545 static void intel_update_crtcs(struct drm_atomic_state *state)
12546 {
12547         struct drm_crtc *crtc;
12548         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12549         int i;
12550
12551         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12552                 if (!new_crtc_state->active)
12553                         continue;
12554
12555                 intel_update_crtc(crtc, state, old_crtc_state,
12556                                   new_crtc_state);
12557         }
12558 }
12559
12560 static void skl_update_crtcs(struct drm_atomic_state *state)
12561 {
12562         struct drm_i915_private *dev_priv = to_i915(state->dev);
12563         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12564         struct drm_crtc *crtc;
12565         struct intel_crtc *intel_crtc;
12566         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12567         struct intel_crtc_state *cstate;
12568         unsigned int updated = 0;
12569         bool progress;
12570         enum pipe pipe;
12571         int i;
12572         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
12573         u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
12574
12575         const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
12576
12577         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
12578                 /* ignore allocations for crtc's that have been turned off. */
12579                 if (new_crtc_state->active)
12580                         entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
12581
12582         /* If 2nd DBuf slice required, enable it here */
12583         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
12584                 icl_dbuf_slices_update(dev_priv, required_slices);
12585
12586         /*
12587          * Whenever the number of active pipes changes, we need to make sure we
12588          * update the pipes in the right order so that their ddb allocations
12589          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
12590          * cause pipe underruns and other bad stuff.
12591          */
12592         do {
12593                 progress = false;
12594
12595                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12596                         bool vbl_wait = false;
12597                         unsigned int cmask = drm_crtc_mask(crtc);
12598
12599                         intel_crtc = to_intel_crtc(crtc);
12600                         cstate = to_intel_crtc_state(new_crtc_state);
12601                         pipe = intel_crtc->pipe;
12602
12603                         if (updated & cmask || !cstate->base.active)
12604                                 continue;
12605
12606                         if (skl_ddb_allocation_overlaps(dev_priv,
12607                                                         entries,
12608                                                         &cstate->wm.skl.ddb,
12609                                                         i))
12610                                 continue;
12611
12612                         updated |= cmask;
12613                         entries[i] = &cstate->wm.skl.ddb;
12614
12615                         /*
12616                          * If this is an already active pipe, it's DDB changed,
12617                          * and this isn't the last pipe that needs updating
12618                          * then we need to wait for a vblank to pass for the
12619                          * new ddb allocation to take effect.
12620                          */
12621                         if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
12622                                                  &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
12623                             !new_crtc_state->active_changed &&
12624                             intel_state->wm_results.dirty_pipes != updated)
12625                                 vbl_wait = true;
12626
12627                         intel_update_crtc(crtc, state, old_crtc_state,
12628                                           new_crtc_state);
12629
12630                         if (vbl_wait)
12631                                 intel_wait_for_vblank(dev_priv, pipe);
12632
12633                         progress = true;
12634                 }
12635         } while (progress);
12636
12637         /* If 2nd DBuf slice is no more required disable it */
12638         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
12639                 icl_dbuf_slices_update(dev_priv, required_slices);
12640 }
12641
12642 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
12643 {
12644         struct intel_atomic_state *state, *next;
12645         struct llist_node *freed;
12646
12647         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
12648         llist_for_each_entry_safe(state, next, freed, freed)
12649                 drm_atomic_state_put(&state->base);
12650 }
12651
12652 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
12653 {
12654         struct drm_i915_private *dev_priv =
12655                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
12656
12657         intel_atomic_helper_free_state(dev_priv);
12658 }
12659
12660 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
12661 {
12662         struct wait_queue_entry wait_fence, wait_reset;
12663         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
12664
12665         init_wait_entry(&wait_fence, 0);
12666         init_wait_entry(&wait_reset, 0);
12667         for (;;) {
12668                 prepare_to_wait(&intel_state->commit_ready.wait,
12669                                 &wait_fence, TASK_UNINTERRUPTIBLE);
12670                 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
12671                                 &wait_reset, TASK_UNINTERRUPTIBLE);
12672
12673
12674                 if (i915_sw_fence_done(&intel_state->commit_ready)
12675                     || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
12676                         break;
12677
12678                 schedule();
12679         }
12680         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
12681         finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
12682 }
12683
12684 static void intel_atomic_cleanup_work(struct work_struct *work)
12685 {
12686         struct drm_atomic_state *state =
12687                 container_of(work, struct drm_atomic_state, commit_work);
12688         struct drm_i915_private *i915 = to_i915(state->dev);
12689
12690         drm_atomic_helper_cleanup_planes(&i915->drm, state);
12691         drm_atomic_helper_commit_cleanup_done(state);
12692         drm_atomic_state_put(state);
12693
12694         intel_atomic_helper_free_state(i915);
12695 }
12696
12697 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12698 {
12699         struct drm_device *dev = state->dev;
12700         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12701         struct drm_i915_private *dev_priv = to_i915(dev);
12702         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12703         struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
12704         struct drm_crtc *crtc;
12705         struct intel_crtc *intel_crtc;
12706         u64 put_domains[I915_MAX_PIPES] = {};
12707         int i;
12708
12709         intel_atomic_commit_fence_wait(intel_state);
12710
12711         drm_atomic_helper_wait_for_dependencies(state);
12712
12713         if (intel_state->modeset)
12714                 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
12715
12716         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12717                 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
12718                 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
12719                 intel_crtc = to_intel_crtc(crtc);
12720
12721                 if (needs_modeset(new_crtc_state) ||
12722                     to_intel_crtc_state(new_crtc_state)->update_pipe) {
12723
12724                         put_domains[intel_crtc->pipe] =
12725                                 modeset_get_crtc_power_domains(crtc,
12726                                         new_intel_crtc_state);
12727                 }
12728
12729                 if (!needs_modeset(new_crtc_state))
12730                         continue;
12731
12732                 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
12733
12734                 if (old_crtc_state->active) {
12735                         intel_crtc_disable_planes(intel_crtc, old_intel_crtc_state->active_planes);
12736
12737                         /*
12738                          * We need to disable pipe CRC before disabling the pipe,
12739                          * or we race against vblank off.
12740                          */
12741                         intel_crtc_disable_pipe_crc(intel_crtc);
12742
12743                         dev_priv->display.crtc_disable(old_intel_crtc_state, state);
12744                         intel_crtc->active = false;
12745                         intel_fbc_disable(intel_crtc);
12746                         intel_disable_shared_dpll(intel_crtc);
12747
12748                         /*
12749                          * Underruns don't always raise
12750                          * interrupts, so check manually.
12751                          */
12752                         intel_check_cpu_fifo_underruns(dev_priv);
12753                         intel_check_pch_fifo_underruns(dev_priv);
12754
12755                         if (!new_crtc_state->active) {
12756                                 /*
12757                                  * Make sure we don't call initial_watermarks
12758                                  * for ILK-style watermark updates.
12759                                  *
12760                                  * No clue what this is supposed to achieve.
12761                                  */
12762                                 if (INTEL_GEN(dev_priv) >= 9)
12763                                         dev_priv->display.initial_watermarks(intel_state,
12764                                                                              new_intel_crtc_state);
12765                         }
12766                 }
12767         }
12768
12769         /* FIXME: Eventually get rid of our intel_crtc->config pointer */
12770         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
12771                 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
12772
12773         if (intel_state->modeset) {
12774                 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
12775
12776                 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
12777
12778                 /*
12779                  * SKL workaround: bspec recommends we disable the SAGV when we
12780                  * have more then one pipe enabled
12781                  */
12782                 if (!intel_can_enable_sagv(state))
12783                         intel_disable_sagv(dev_priv);
12784
12785                 intel_modeset_verify_disabled(dev, state);
12786         }
12787
12788         /* Complete the events for pipes that have now been disabled */
12789         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12790                 bool modeset = needs_modeset(new_crtc_state);
12791
12792                 /* Complete events for now disable pipes here. */
12793                 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
12794                         spin_lock_irq(&dev->event_lock);
12795                         drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
12796                         spin_unlock_irq(&dev->event_lock);
12797
12798                         new_crtc_state->event = NULL;
12799                 }
12800         }
12801
12802         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
12803         dev_priv->display.update_crtcs(state);
12804
12805         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
12806          * already, but still need the state for the delayed optimization. To
12807          * fix this:
12808          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
12809          * - schedule that vblank worker _before_ calling hw_done
12810          * - at the start of commit_tail, cancel it _synchrously
12811          * - switch over to the vblank wait helper in the core after that since
12812          *   we don't need out special handling any more.
12813          */
12814         drm_atomic_helper_wait_for_flip_done(dev, state);
12815
12816         /*
12817          * Now that the vblank has passed, we can go ahead and program the
12818          * optimal watermarks on platforms that need two-step watermark
12819          * programming.
12820          *
12821          * TODO: Move this (and other cleanup) to an async worker eventually.
12822          */
12823         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12824                 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
12825
12826                 if (dev_priv->display.optimize_watermarks)
12827                         dev_priv->display.optimize_watermarks(intel_state,
12828                                                               new_intel_crtc_state);
12829         }
12830
12831         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12832                 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
12833
12834                 if (put_domains[i])
12835                         modeset_put_power_domains(dev_priv, put_domains[i]);
12836
12837                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
12838         }
12839
12840         if (intel_state->modeset)
12841                 intel_verify_planes(intel_state);
12842
12843         if (intel_state->modeset && intel_can_enable_sagv(state))
12844                 intel_enable_sagv(dev_priv);
12845
12846         drm_atomic_helper_commit_hw_done(state);
12847
12848         if (intel_state->modeset) {
12849                 /* As one of the primary mmio accessors, KMS has a high
12850                  * likelihood of triggering bugs in unclaimed access. After we
12851                  * finish modesetting, see if an error has been flagged, and if
12852                  * so enable debugging for the next modeset - and hope we catch
12853                  * the culprit.
12854                  */
12855                 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
12856                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
12857         }
12858
12859         /*
12860          * Defer the cleanup of the old state to a separate worker to not
12861          * impede the current task (userspace for blocking modesets) that
12862          * are executed inline. For out-of-line asynchronous modesets/flips,
12863          * deferring to a new worker seems overkill, but we would place a
12864          * schedule point (cond_resched()) here anyway to keep latencies
12865          * down.
12866          */
12867         INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
12868         queue_work(system_highpri_wq, &state->commit_work);
12869 }
12870
12871 static void intel_atomic_commit_work(struct work_struct *work)
12872 {
12873         struct drm_atomic_state *state =
12874                 container_of(work, struct drm_atomic_state, commit_work);
12875
12876         intel_atomic_commit_tail(state);
12877 }
12878
12879 static int __i915_sw_fence_call
12880 intel_atomic_commit_ready(struct i915_sw_fence *fence,
12881                           enum i915_sw_fence_notify notify)
12882 {
12883         struct intel_atomic_state *state =
12884                 container_of(fence, struct intel_atomic_state, commit_ready);
12885
12886         switch (notify) {
12887         case FENCE_COMPLETE:
12888                 /* we do blocking waits in the worker, nothing to do here */
12889                 break;
12890         case FENCE_FREE:
12891                 {
12892                         struct intel_atomic_helper *helper =
12893                                 &to_i915(state->base.dev)->atomic_helper;
12894
12895                         if (llist_add(&state->freed, &helper->free_list))
12896                                 schedule_work(&helper->free_work);
12897                         break;
12898                 }
12899         }
12900
12901         return NOTIFY_DONE;
12902 }
12903
12904 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
12905 {
12906         struct drm_plane_state *old_plane_state, *new_plane_state;
12907         struct drm_plane *plane;
12908         int i;
12909
12910         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
12911                 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
12912                                   intel_fb_obj(new_plane_state->fb),
12913                                   to_intel_plane(plane)->frontbuffer_bit);
12914 }
12915
12916 /**
12917  * intel_atomic_commit - commit validated state object
12918  * @dev: DRM device
12919  * @state: the top-level driver state object
12920  * @nonblock: nonblocking commit
12921  *
12922  * This function commits a top-level state object that has been validated
12923  * with drm_atomic_helper_check().
12924  *
12925  * RETURNS
12926  * Zero for success or -errno.
12927  */
12928 static int intel_atomic_commit(struct drm_device *dev,
12929                                struct drm_atomic_state *state,
12930                                bool nonblock)
12931 {
12932         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12933         struct drm_i915_private *dev_priv = to_i915(dev);
12934         int ret = 0;
12935
12936         drm_atomic_state_get(state);
12937         i915_sw_fence_init(&intel_state->commit_ready,
12938                            intel_atomic_commit_ready);
12939
12940         /*
12941          * The intel_legacy_cursor_update() fast path takes care
12942          * of avoiding the vblank waits for simple cursor
12943          * movement and flips. For cursor on/off and size changes,
12944          * we want to perform the vblank waits so that watermark
12945          * updates happen during the correct frames. Gen9+ have
12946          * double buffered watermarks and so shouldn't need this.
12947          *
12948          * Unset state->legacy_cursor_update before the call to
12949          * drm_atomic_helper_setup_commit() because otherwise
12950          * drm_atomic_helper_wait_for_flip_done() is a noop and
12951          * we get FIFO underruns because we didn't wait
12952          * for vblank.
12953          *
12954          * FIXME doing watermarks and fb cleanup from a vblank worker
12955          * (assuming we had any) would solve these problems.
12956          */
12957         if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
12958                 struct intel_crtc_state *new_crtc_state;
12959                 struct intel_crtc *crtc;
12960                 int i;
12961
12962                 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
12963                         if (new_crtc_state->wm.need_postvbl_update ||
12964                             new_crtc_state->update_wm_post)
12965                                 state->legacy_cursor_update = false;
12966         }
12967
12968         ret = intel_atomic_prepare_commit(dev, state);
12969         if (ret) {
12970                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
12971                 i915_sw_fence_commit(&intel_state->commit_ready);
12972                 return ret;
12973         }
12974
12975         ret = drm_atomic_helper_setup_commit(state, nonblock);
12976         if (!ret)
12977                 ret = drm_atomic_helper_swap_state(state, true);
12978
12979         if (ret) {
12980                 i915_sw_fence_commit(&intel_state->commit_ready);
12981
12982                 drm_atomic_helper_cleanup_planes(dev, state);
12983                 return ret;
12984         }
12985         dev_priv->wm.distrust_bios_wm = false;
12986         intel_shared_dpll_swap_state(state);
12987         intel_atomic_track_fbs(state);
12988
12989         if (intel_state->modeset) {
12990                 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
12991                        sizeof(intel_state->min_cdclk));
12992                 memcpy(dev_priv->min_voltage_level,
12993                        intel_state->min_voltage_level,
12994                        sizeof(intel_state->min_voltage_level));
12995                 dev_priv->active_crtcs = intel_state->active_crtcs;
12996                 dev_priv->cdclk.logical = intel_state->cdclk.logical;
12997                 dev_priv->cdclk.actual = intel_state->cdclk.actual;
12998         }
12999
13000         drm_atomic_state_get(state);
13001         INIT_WORK(&state->commit_work, intel_atomic_commit_work);
13002
13003         i915_sw_fence_commit(&intel_state->commit_ready);
13004         if (nonblock && intel_state->modeset) {
13005                 queue_work(dev_priv->modeset_wq, &state->commit_work);
13006         } else if (nonblock) {
13007                 queue_work(system_unbound_wq, &state->commit_work);
13008         } else {
13009                 if (intel_state->modeset)
13010                         flush_workqueue(dev_priv->modeset_wq);
13011                 intel_atomic_commit_tail(state);
13012         }
13013
13014         return 0;
13015 }
13016
13017 static const struct drm_crtc_funcs intel_crtc_funcs = {
13018         .gamma_set = drm_atomic_helper_legacy_gamma_set,
13019         .set_config = drm_atomic_helper_set_config,
13020         .destroy = intel_crtc_destroy,
13021         .page_flip = drm_atomic_helper_page_flip,
13022         .atomic_duplicate_state = intel_crtc_duplicate_state,
13023         .atomic_destroy_state = intel_crtc_destroy_state,
13024         .set_crc_source = intel_crtc_set_crc_source,
13025         .verify_crc_source = intel_crtc_verify_crc_source,
13026         .get_crc_sources = intel_crtc_get_crc_sources,
13027 };
13028
13029 struct wait_rps_boost {
13030         struct wait_queue_entry wait;
13031
13032         struct drm_crtc *crtc;
13033         struct i915_request *request;
13034 };
13035
13036 static int do_rps_boost(struct wait_queue_entry *_wait,
13037                         unsigned mode, int sync, void *key)
13038 {
13039         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
13040         struct i915_request *rq = wait->request;
13041
13042         /*
13043          * If we missed the vblank, but the request is already running it
13044          * is reasonable to assume that it will complete before the next
13045          * vblank without our intervention, so leave RPS alone.
13046          */
13047         if (!i915_request_started(rq))
13048                 gen6_rps_boost(rq, NULL);
13049         i915_request_put(rq);
13050
13051         drm_crtc_vblank_put(wait->crtc);
13052
13053         list_del(&wait->wait.entry);
13054         kfree(wait);
13055         return 1;
13056 }
13057
13058 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
13059                                        struct dma_fence *fence)
13060 {
13061         struct wait_rps_boost *wait;
13062
13063         if (!dma_fence_is_i915(fence))
13064                 return;
13065
13066         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
13067                 return;
13068
13069         if (drm_crtc_vblank_get(crtc))
13070                 return;
13071
13072         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
13073         if (!wait) {
13074                 drm_crtc_vblank_put(crtc);
13075                 return;
13076         }
13077
13078         wait->request = to_request(dma_fence_get(fence));
13079         wait->crtc = crtc;
13080
13081         wait->wait.func = do_rps_boost;
13082         wait->wait.flags = 0;
13083
13084         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
13085 }
13086
13087 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
13088 {
13089         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
13090         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
13091         struct drm_framebuffer *fb = plane_state->base.fb;
13092         struct i915_vma *vma;
13093
13094         if (plane->id == PLANE_CURSOR &&
13095             INTEL_INFO(dev_priv)->cursor_needs_physical) {
13096                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13097                 const int align = intel_cursor_alignment(dev_priv);
13098                 int err;
13099
13100                 err = i915_gem_object_attach_phys(obj, align);
13101                 if (err)
13102                         return err;
13103         }
13104
13105         vma = intel_pin_and_fence_fb_obj(fb,
13106                                          &plane_state->view,
13107                                          intel_plane_uses_fence(plane_state),
13108                                          &plane_state->flags);
13109         if (IS_ERR(vma))
13110                 return PTR_ERR(vma);
13111
13112         plane_state->vma = vma;
13113
13114         return 0;
13115 }
13116
13117 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
13118 {
13119         struct i915_vma *vma;
13120
13121         vma = fetch_and_zero(&old_plane_state->vma);
13122         if (vma)
13123                 intel_unpin_fb_vma(vma, old_plane_state->flags);
13124 }
13125
13126 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
13127 {
13128         struct i915_sched_attr attr = {
13129                 .priority = I915_PRIORITY_DISPLAY,
13130         };
13131
13132         i915_gem_object_wait_priority(obj, 0, &attr);
13133 }
13134
13135 /**
13136  * intel_prepare_plane_fb - Prepare fb for usage on plane
13137  * @plane: drm plane to prepare for
13138  * @new_state: the plane state being prepared
13139  *
13140  * Prepares a framebuffer for usage on a display plane.  Generally this
13141  * involves pinning the underlying object and updating the frontbuffer tracking
13142  * bits.  Some older platforms need special physical address handling for
13143  * cursor planes.
13144  *
13145  * Must be called with struct_mutex held.
13146  *
13147  * Returns 0 on success, negative error code on failure.
13148  */
13149 int
13150 intel_prepare_plane_fb(struct drm_plane *plane,
13151                        struct drm_plane_state *new_state)
13152 {
13153         struct intel_atomic_state *intel_state =
13154                 to_intel_atomic_state(new_state->state);
13155         struct drm_i915_private *dev_priv = to_i915(plane->dev);
13156         struct drm_framebuffer *fb = new_state->fb;
13157         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13158         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13159         int ret;
13160
13161         if (old_obj) {
13162                 struct drm_crtc_state *crtc_state =
13163                         drm_atomic_get_new_crtc_state(new_state->state,
13164                                                       plane->state->crtc);
13165
13166                 /* Big Hammer, we also need to ensure that any pending
13167                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13168                  * current scanout is retired before unpinning the old
13169                  * framebuffer. Note that we rely on userspace rendering
13170                  * into the buffer attached to the pipe they are waiting
13171                  * on. If not, userspace generates a GPU hang with IPEHR
13172                  * point to the MI_WAIT_FOR_EVENT.
13173                  *
13174                  * This should only fail upon a hung GPU, in which case we
13175                  * can safely continue.
13176                  */
13177                 if (needs_modeset(crtc_state)) {
13178                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13179                                                               old_obj->resv, NULL,
13180                                                               false, 0,
13181                                                               GFP_KERNEL);
13182                         if (ret < 0)
13183                                 return ret;
13184                 }
13185         }
13186
13187         if (new_state->fence) { /* explicit fencing */
13188                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
13189                                                     new_state->fence,
13190                                                     I915_FENCE_TIMEOUT,
13191                                                     GFP_KERNEL);
13192                 if (ret < 0)
13193                         return ret;
13194         }
13195
13196         if (!obj)
13197                 return 0;
13198
13199         ret = i915_gem_object_pin_pages(obj);
13200         if (ret)
13201                 return ret;
13202
13203         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13204         if (ret) {
13205                 i915_gem_object_unpin_pages(obj);
13206                 return ret;
13207         }
13208
13209         ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
13210
13211         mutex_unlock(&dev_priv->drm.struct_mutex);
13212         i915_gem_object_unpin_pages(obj);
13213         if (ret)
13214                 return ret;
13215
13216         fb_obj_bump_render_priority(obj);
13217         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
13218
13219         if (!new_state->fence) { /* implicit fencing */
13220                 struct dma_fence *fence;
13221
13222                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13223                                                       obj->resv, NULL,
13224                                                       false, I915_FENCE_TIMEOUT,
13225                                                       GFP_KERNEL);
13226                 if (ret < 0)
13227                         return ret;
13228
13229                 fence = reservation_object_get_excl_rcu(obj->resv);
13230                 if (fence) {
13231                         add_rps_boost_after_vblank(new_state->crtc, fence);
13232                         dma_fence_put(fence);
13233                 }
13234         } else {
13235                 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
13236         }
13237
13238         /*
13239          * We declare pageflips to be interactive and so merit a small bias
13240          * towards upclocking to deliver the frame on time. By only changing
13241          * the RPS thresholds to sample more regularly and aim for higher
13242          * clocks we can hopefully deliver low power workloads (like kodi)
13243          * that are not quite steady state without resorting to forcing
13244          * maximum clocks following a vblank miss (see do_rps_boost()).
13245          */
13246         if (!intel_state->rps_interactive) {
13247                 intel_rps_mark_interactive(dev_priv, true);
13248                 intel_state->rps_interactive = true;
13249         }
13250
13251         return 0;
13252 }
13253
13254 /**
13255  * intel_cleanup_plane_fb - Cleans up an fb after plane use
13256  * @plane: drm plane to clean up for
13257  * @old_state: the state from the previous modeset
13258  *
13259  * Cleans up a framebuffer that has just been removed from a plane.
13260  *
13261  * Must be called with struct_mutex held.
13262  */
13263 void
13264 intel_cleanup_plane_fb(struct drm_plane *plane,
13265                        struct drm_plane_state *old_state)
13266 {
13267         struct intel_atomic_state *intel_state =
13268                 to_intel_atomic_state(old_state->state);
13269         struct drm_i915_private *dev_priv = to_i915(plane->dev);
13270
13271         if (intel_state->rps_interactive) {
13272                 intel_rps_mark_interactive(dev_priv, false);
13273                 intel_state->rps_interactive = false;
13274         }
13275
13276         /* Should only be called after a successful intel_prepare_plane_fb()! */
13277         mutex_lock(&dev_priv->drm.struct_mutex);
13278         intel_plane_unpin_fb(to_intel_plane_state(old_state));
13279         mutex_unlock(&dev_priv->drm.struct_mutex);
13280 }
13281
13282 int
13283 skl_max_scale(const struct intel_crtc_state *crtc_state,
13284               u32 pixel_format)
13285 {
13286         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13287         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13288         int max_scale, mult;
13289         int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
13290
13291         if (!crtc_state->base.enable)
13292                 return DRM_PLANE_HELPER_NO_SCALING;
13293
13294         crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13295         max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
13296
13297         if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
13298                 max_dotclk *= 2;
13299
13300         if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
13301                 return DRM_PLANE_HELPER_NO_SCALING;
13302
13303         /*
13304          * skl max scale is lower of:
13305          *    close to 3 but not 3, -1 is for that purpose
13306          *            or
13307          *    cdclk/crtc_clock
13308          */
13309         mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3;
13310         tmpclk1 = (1 << 16) * mult - 1;
13311         tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
13312         max_scale = min(tmpclk1, tmpclk2);
13313
13314         return max_scale;
13315 }
13316
13317 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13318                                     struct drm_crtc_state *old_crtc_state)
13319 {
13320         struct drm_device *dev = crtc->dev;
13321         struct drm_i915_private *dev_priv = to_i915(dev);
13322         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13323         struct intel_crtc_state *old_intel_cstate =
13324                 to_intel_crtc_state(old_crtc_state);
13325         struct intel_atomic_state *old_intel_state =
13326                 to_intel_atomic_state(old_crtc_state->state);
13327         struct intel_crtc_state *intel_cstate =
13328                 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13329         bool modeset = needs_modeset(&intel_cstate->base);
13330
13331         if (!modeset &&
13332             (intel_cstate->base.color_mgmt_changed ||
13333              intel_cstate->update_pipe)) {
13334                 intel_color_set_csc(&intel_cstate->base);
13335                 intel_color_load_luts(&intel_cstate->base);
13336         }
13337
13338         /* Perform vblank evasion around commit operation */
13339         intel_pipe_update_start(intel_cstate);
13340
13341         if (modeset)
13342                 goto out;
13343
13344         if (intel_cstate->update_pipe)
13345                 intel_update_pipe_config(old_intel_cstate, intel_cstate);
13346         else if (INTEL_GEN(dev_priv) >= 9)
13347                 skl_detach_scalers(intel_crtc);
13348
13349 out:
13350         if (dev_priv->display.atomic_update_watermarks)
13351                 dev_priv->display.atomic_update_watermarks(old_intel_state,
13352                                                            intel_cstate);
13353 }
13354
13355 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
13356                                   struct intel_crtc_state *crtc_state)
13357 {
13358         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13359
13360         if (!IS_GEN2(dev_priv))
13361                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
13362
13363         if (crtc_state->has_pch_encoder) {
13364                 enum pipe pch_transcoder =
13365                         intel_crtc_pch_transcoder(crtc);
13366
13367                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
13368         }
13369 }
13370
13371 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13372                                      struct drm_crtc_state *old_crtc_state)
13373 {
13374         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13375         struct intel_atomic_state *old_intel_state =
13376                 to_intel_atomic_state(old_crtc_state->state);
13377         struct intel_crtc_state *new_crtc_state =
13378                 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13379
13380         intel_pipe_update_end(new_crtc_state);
13381
13382         if (new_crtc_state->update_pipe &&
13383             !needs_modeset(&new_crtc_state->base) &&
13384             old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
13385                 intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
13386 }
13387
13388 /**
13389  * intel_plane_destroy - destroy a plane
13390  * @plane: plane to destroy
13391  *
13392  * Common destruction function for all types of planes (primary, cursor,
13393  * sprite).
13394  */
13395 void intel_plane_destroy(struct drm_plane *plane)
13396 {
13397         drm_plane_cleanup(plane);
13398         kfree(to_intel_plane(plane));
13399 }
13400
13401 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
13402                                             u32 format, u64 modifier)
13403 {
13404         switch (modifier) {
13405         case DRM_FORMAT_MOD_LINEAR:
13406         case I915_FORMAT_MOD_X_TILED:
13407                 break;
13408         default:
13409                 return false;
13410         }
13411
13412         switch (format) {
13413         case DRM_FORMAT_C8:
13414         case DRM_FORMAT_RGB565:
13415         case DRM_FORMAT_XRGB1555:
13416         case DRM_FORMAT_XRGB8888:
13417                 return modifier == DRM_FORMAT_MOD_LINEAR ||
13418                         modifier == I915_FORMAT_MOD_X_TILED;
13419         default:
13420                 return false;
13421         }
13422 }
13423
13424 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
13425                                             u32 format, u64 modifier)
13426 {
13427         switch (modifier) {
13428         case DRM_FORMAT_MOD_LINEAR:
13429         case I915_FORMAT_MOD_X_TILED:
13430                 break;
13431         default:
13432                 return false;
13433         }
13434
13435         switch (format) {
13436         case DRM_FORMAT_C8:
13437         case DRM_FORMAT_RGB565:
13438         case DRM_FORMAT_XRGB8888:
13439         case DRM_FORMAT_XBGR8888:
13440         case DRM_FORMAT_XRGB2101010:
13441         case DRM_FORMAT_XBGR2101010:
13442                 return modifier == DRM_FORMAT_MOD_LINEAR ||
13443                         modifier == I915_FORMAT_MOD_X_TILED;
13444         default:
13445                 return false;
13446         }
13447 }
13448
13449 static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
13450                                            u32 format, u64 modifier)
13451 {
13452         struct intel_plane *plane = to_intel_plane(_plane);
13453
13454         switch (modifier) {
13455         case DRM_FORMAT_MOD_LINEAR:
13456         case I915_FORMAT_MOD_X_TILED:
13457         case I915_FORMAT_MOD_Y_TILED:
13458         case I915_FORMAT_MOD_Yf_TILED:
13459                 break;
13460         case I915_FORMAT_MOD_Y_TILED_CCS:
13461         case I915_FORMAT_MOD_Yf_TILED_CCS:
13462                 if (!plane->has_ccs)
13463                         return false;
13464                 break;
13465         default:
13466                 return false;
13467         }
13468
13469         switch (format) {
13470         case DRM_FORMAT_XRGB8888:
13471         case DRM_FORMAT_XBGR8888:
13472         case DRM_FORMAT_ARGB8888:
13473         case DRM_FORMAT_ABGR8888:
13474                 if (is_ccs_modifier(modifier))
13475                         return true;
13476                 /* fall through */
13477         case DRM_FORMAT_RGB565:
13478         case DRM_FORMAT_XRGB2101010:
13479         case DRM_FORMAT_XBGR2101010:
13480         case DRM_FORMAT_YUYV:
13481         case DRM_FORMAT_YVYU:
13482         case DRM_FORMAT_UYVY:
13483         case DRM_FORMAT_VYUY:
13484         case DRM_FORMAT_NV12:
13485                 if (modifier == I915_FORMAT_MOD_Yf_TILED)
13486                         return true;
13487                 /* fall through */
13488         case DRM_FORMAT_C8:
13489                 if (modifier == DRM_FORMAT_MOD_LINEAR ||
13490                     modifier == I915_FORMAT_MOD_X_TILED ||
13491                     modifier == I915_FORMAT_MOD_Y_TILED)
13492                         return true;
13493                 /* fall through */
13494         default:
13495                 return false;
13496         }
13497 }
13498
13499 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
13500                                               u32 format, u64 modifier)
13501 {
13502         return modifier == DRM_FORMAT_MOD_LINEAR &&
13503                 format == DRM_FORMAT_ARGB8888;
13504 }
13505
13506 static struct drm_plane_funcs skl_plane_funcs = {
13507         .update_plane = drm_atomic_helper_update_plane,
13508         .disable_plane = drm_atomic_helper_disable_plane,
13509         .destroy = intel_plane_destroy,
13510         .atomic_get_property = intel_plane_atomic_get_property,
13511         .atomic_set_property = intel_plane_atomic_set_property,
13512         .atomic_duplicate_state = intel_plane_duplicate_state,
13513         .atomic_destroy_state = intel_plane_destroy_state,
13514         .format_mod_supported = skl_plane_format_mod_supported,
13515 };
13516
13517 static struct drm_plane_funcs i965_plane_funcs = {
13518         .update_plane = drm_atomic_helper_update_plane,
13519         .disable_plane = drm_atomic_helper_disable_plane,
13520         .destroy = intel_plane_destroy,
13521         .atomic_get_property = intel_plane_atomic_get_property,
13522         .atomic_set_property = intel_plane_atomic_set_property,
13523         .atomic_duplicate_state = intel_plane_duplicate_state,
13524         .atomic_destroy_state = intel_plane_destroy_state,
13525         .format_mod_supported = i965_plane_format_mod_supported,
13526 };
13527
13528 static struct drm_plane_funcs i8xx_plane_funcs = {
13529         .update_plane = drm_atomic_helper_update_plane,
13530         .disable_plane = drm_atomic_helper_disable_plane,
13531         .destroy = intel_plane_destroy,
13532         .atomic_get_property = intel_plane_atomic_get_property,
13533         .atomic_set_property = intel_plane_atomic_set_property,
13534         .atomic_duplicate_state = intel_plane_duplicate_state,
13535         .atomic_destroy_state = intel_plane_destroy_state,
13536         .format_mod_supported = i8xx_plane_format_mod_supported,
13537 };
13538
13539 static int
13540 intel_legacy_cursor_update(struct drm_plane *plane,
13541                            struct drm_crtc *crtc,
13542                            struct drm_framebuffer *fb,
13543                            int crtc_x, int crtc_y,
13544                            unsigned int crtc_w, unsigned int crtc_h,
13545                            uint32_t src_x, uint32_t src_y,
13546                            uint32_t src_w, uint32_t src_h,
13547                            struct drm_modeset_acquire_ctx *ctx)
13548 {
13549         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
13550         int ret;
13551         struct drm_plane_state *old_plane_state, *new_plane_state;
13552         struct intel_plane *intel_plane = to_intel_plane(plane);
13553         struct drm_framebuffer *old_fb;
13554         struct intel_crtc_state *crtc_state =
13555                 to_intel_crtc_state(crtc->state);
13556         struct intel_crtc_state *new_crtc_state;
13557
13558         /*
13559          * When crtc is inactive or there is a modeset pending,
13560          * wait for it to complete in the slowpath
13561          */
13562         if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
13563             crtc_state->update_pipe)
13564                 goto slow;
13565
13566         old_plane_state = plane->state;
13567         /*
13568          * Don't do an async update if there is an outstanding commit modifying
13569          * the plane.  This prevents our async update's changes from getting
13570          * overridden by a previous synchronous update's state.
13571          */
13572         if (old_plane_state->commit &&
13573             !try_wait_for_completion(&old_plane_state->commit->hw_done))
13574                 goto slow;
13575
13576         /*
13577          * If any parameters change that may affect watermarks,
13578          * take the slowpath. Only changing fb or position should be
13579          * in the fastpath.
13580          */
13581         if (old_plane_state->crtc != crtc ||
13582             old_plane_state->src_w != src_w ||
13583             old_plane_state->src_h != src_h ||
13584             old_plane_state->crtc_w != crtc_w ||
13585             old_plane_state->crtc_h != crtc_h ||
13586             !old_plane_state->fb != !fb)
13587                 goto slow;
13588
13589         new_plane_state = intel_plane_duplicate_state(plane);
13590         if (!new_plane_state)
13591                 return -ENOMEM;
13592
13593         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
13594         if (!new_crtc_state) {
13595                 ret = -ENOMEM;
13596                 goto out_free;
13597         }
13598
13599         drm_atomic_set_fb_for_plane(new_plane_state, fb);
13600
13601         new_plane_state->src_x = src_x;
13602         new_plane_state->src_y = src_y;
13603         new_plane_state->src_w = src_w;
13604         new_plane_state->src_h = src_h;
13605         new_plane_state->crtc_x = crtc_x;
13606         new_plane_state->crtc_y = crtc_y;
13607         new_plane_state->crtc_w = crtc_w;
13608         new_plane_state->crtc_h = crtc_h;
13609
13610         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
13611                                                   to_intel_plane_state(old_plane_state),
13612                                                   to_intel_plane_state(new_plane_state));
13613         if (ret)
13614                 goto out_free;
13615
13616         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13617         if (ret)
13618                 goto out_free;
13619
13620         ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
13621         if (ret)
13622                 goto out_unlock;
13623
13624         intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
13625
13626         old_fb = old_plane_state->fb;
13627         i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
13628                           intel_plane->frontbuffer_bit);
13629
13630         /* Swap plane state */
13631         plane->state = new_plane_state;
13632
13633         /*
13634          * We cannot swap crtc_state as it may be in use by an atomic commit or
13635          * page flip that's running simultaneously. If we swap crtc_state and
13636          * destroy the old state, we will cause a use-after-free there.
13637          *
13638          * Only update active_planes, which is needed for our internal
13639          * bookkeeping. Either value will do the right thing when updating
13640          * planes atomically. If the cursor was part of the atomic update then
13641          * we would have taken the slowpath.
13642          */
13643         crtc_state->active_planes = new_crtc_state->active_planes;
13644
13645         if (plane->state->visible) {
13646                 trace_intel_update_plane(plane, to_intel_crtc(crtc));
13647                 intel_plane->update_plane(intel_plane, crtc_state,
13648                                           to_intel_plane_state(plane->state));
13649         } else {
13650                 trace_intel_disable_plane(plane, to_intel_crtc(crtc));
13651                 intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
13652         }
13653
13654         intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
13655
13656 out_unlock:
13657         mutex_unlock(&dev_priv->drm.struct_mutex);
13658 out_free:
13659         if (new_crtc_state)
13660                 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
13661         if (ret)
13662                 intel_plane_destroy_state(plane, new_plane_state);
13663         else
13664                 intel_plane_destroy_state(plane, old_plane_state);
13665         return ret;
13666
13667 slow:
13668         return drm_atomic_helper_update_plane(plane, crtc, fb,
13669                                               crtc_x, crtc_y, crtc_w, crtc_h,
13670                                               src_x, src_y, src_w, src_h, ctx);
13671 }
13672
13673 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
13674         .update_plane = intel_legacy_cursor_update,
13675         .disable_plane = drm_atomic_helper_disable_plane,
13676         .destroy = intel_plane_destroy,
13677         .atomic_get_property = intel_plane_atomic_get_property,
13678         .atomic_set_property = intel_plane_atomic_set_property,
13679         .atomic_duplicate_state = intel_plane_duplicate_state,
13680         .atomic_destroy_state = intel_plane_destroy_state,
13681         .format_mod_supported = intel_cursor_format_mod_supported,
13682 };
13683
13684 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
13685                                enum i9xx_plane_id i9xx_plane)
13686 {
13687         if (!HAS_FBC(dev_priv))
13688                 return false;
13689
13690         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
13691                 return i9xx_plane == PLANE_A; /* tied to pipe A */
13692         else if (IS_IVYBRIDGE(dev_priv))
13693                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
13694                         i9xx_plane == PLANE_C;
13695         else if (INTEL_GEN(dev_priv) >= 4)
13696                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
13697         else
13698                 return i9xx_plane == PLANE_A;
13699 }
13700
13701 static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
13702                               enum pipe pipe, enum plane_id plane_id)
13703 {
13704         if (!HAS_FBC(dev_priv))
13705                 return false;
13706
13707         return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
13708 }
13709
13710 bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
13711                           enum pipe pipe, enum plane_id plane_id)
13712 {
13713         /*
13714          * FIXME: ICL requires two hardware planes for scanning out NV12
13715          * framebuffers. Do not advertize support until this is implemented.
13716          */
13717         if (INTEL_GEN(dev_priv) >= 11)
13718                 return false;
13719
13720         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
13721                 return false;
13722
13723         if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
13724                 return false;
13725
13726         if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
13727                 return false;
13728
13729         return true;
13730 }
13731
13732 static struct intel_plane *
13733 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13734 {
13735         struct intel_plane *primary = NULL;
13736         struct intel_plane_state *state = NULL;
13737         const struct drm_plane_funcs *plane_funcs;
13738         const uint32_t *intel_primary_formats;
13739         unsigned int supported_rotations;
13740         unsigned int num_formats;
13741         const uint64_t *modifiers;
13742         int ret;
13743
13744         primary = kzalloc(sizeof(*primary), GFP_KERNEL);
13745         if (!primary) {
13746                 ret = -ENOMEM;
13747                 goto fail;
13748         }
13749
13750         state = intel_create_plane_state(&primary->base);
13751         if (!state) {
13752                 ret = -ENOMEM;
13753                 goto fail;
13754         }
13755
13756         primary->base.state = &state->base;
13757
13758         if (INTEL_GEN(dev_priv) >= 9)
13759                 state->scaler_id = -1;
13760         primary->pipe = pipe;
13761         /*
13762          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
13763          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
13764          */
13765         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
13766                 primary->i9xx_plane = (enum i9xx_plane_id) !pipe;
13767         else
13768                 primary->i9xx_plane = (enum i9xx_plane_id) pipe;
13769         primary->id = PLANE_PRIMARY;
13770         primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
13771
13772         if (INTEL_GEN(dev_priv) >= 9)
13773                 primary->has_fbc = skl_plane_has_fbc(dev_priv,
13774                                                      primary->pipe,
13775                                                      primary->id);
13776         else
13777                 primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
13778                                                       primary->i9xx_plane);
13779
13780         if (primary->has_fbc) {
13781                 struct intel_fbc *fbc = &dev_priv->fbc;
13782
13783                 fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
13784         }
13785
13786         if (INTEL_GEN(dev_priv) >= 9) {
13787                 primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
13788                                                      PLANE_PRIMARY);
13789
13790                 if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
13791                         intel_primary_formats = skl_pri_planar_formats;
13792                         num_formats = ARRAY_SIZE(skl_pri_planar_formats);
13793                 } else {
13794                         intel_primary_formats = skl_primary_formats;
13795                         num_formats = ARRAY_SIZE(skl_primary_formats);
13796                 }
13797
13798                 if (primary->has_ccs)
13799                         modifiers = skl_format_modifiers_ccs;
13800                 else
13801                         modifiers = skl_format_modifiers_noccs;
13802
13803                 primary->max_stride = skl_plane_max_stride;
13804                 primary->update_plane = skl_update_plane;
13805                 primary->disable_plane = skl_disable_plane;
13806                 primary->get_hw_state = skl_plane_get_hw_state;
13807                 primary->check_plane = skl_plane_check;
13808
13809                 plane_funcs = &skl_plane_funcs;
13810         } else if (INTEL_GEN(dev_priv) >= 4) {
13811                 intel_primary_formats = i965_primary_formats;
13812                 num_formats = ARRAY_SIZE(i965_primary_formats);
13813                 modifiers = i9xx_format_modifiers;
13814
13815                 primary->max_stride = i9xx_plane_max_stride;
13816                 primary->update_plane = i9xx_update_plane;
13817                 primary->disable_plane = i9xx_disable_plane;
13818                 primary->get_hw_state = i9xx_plane_get_hw_state;
13819                 primary->check_plane = i9xx_plane_check;
13820
13821                 plane_funcs = &i965_plane_funcs;
13822         } else {
13823                 intel_primary_formats = i8xx_primary_formats;
13824                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
13825                 modifiers = i9xx_format_modifiers;
13826
13827                 primary->max_stride = i9xx_plane_max_stride;
13828                 primary->update_plane = i9xx_update_plane;
13829                 primary->disable_plane = i9xx_disable_plane;
13830                 primary->get_hw_state = i9xx_plane_get_hw_state;
13831                 primary->check_plane = i9xx_plane_check;
13832
13833                 plane_funcs = &i8xx_plane_funcs;
13834         }
13835
13836         if (INTEL_GEN(dev_priv) >= 9)
13837                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13838                                                0, plane_funcs,
13839                                                intel_primary_formats, num_formats,
13840                                                modifiers,
13841                                                DRM_PLANE_TYPE_PRIMARY,
13842                                                "plane 1%c", pipe_name(pipe));
13843         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
13844                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13845                                                0, plane_funcs,
13846                                                intel_primary_formats, num_formats,
13847                                                modifiers,
13848                                                DRM_PLANE_TYPE_PRIMARY,
13849                                                "primary %c", pipe_name(pipe));
13850         else
13851                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13852                                                0, plane_funcs,
13853                                                intel_primary_formats, num_formats,
13854                                                modifiers,
13855                                                DRM_PLANE_TYPE_PRIMARY,
13856                                                "plane %c",
13857                                                plane_name(primary->i9xx_plane));
13858         if (ret)
13859                 goto fail;
13860
13861         if (INTEL_GEN(dev_priv) >= 10) {
13862                 supported_rotations =
13863                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13864                         DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
13865                         DRM_MODE_REFLECT_X;
13866         } else if (INTEL_GEN(dev_priv) >= 9) {
13867                 supported_rotations =
13868                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13869                         DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
13870         } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
13871                 supported_rotations =
13872                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
13873                         DRM_MODE_REFLECT_X;
13874         } else if (INTEL_GEN(dev_priv) >= 4) {
13875                 supported_rotations =
13876                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
13877         } else {
13878                 supported_rotations = DRM_MODE_ROTATE_0;
13879         }
13880
13881         if (INTEL_GEN(dev_priv) >= 4)
13882                 drm_plane_create_rotation_property(&primary->base,
13883                                                    DRM_MODE_ROTATE_0,
13884                                                    supported_rotations);
13885
13886         if (INTEL_GEN(dev_priv) >= 9) {
13887                 drm_plane_create_color_properties(&primary->base,
13888                                                   BIT(DRM_COLOR_YCBCR_BT601) |
13889                                                   BIT(DRM_COLOR_YCBCR_BT709),
13890                                                   BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
13891                                                   BIT(DRM_COLOR_YCBCR_FULL_RANGE),
13892                                                   DRM_COLOR_YCBCR_BT709,
13893                                                   DRM_COLOR_YCBCR_LIMITED_RANGE);
13894
13895                 drm_plane_create_alpha_property(&primary->base);
13896                 drm_plane_create_blend_mode_property(&primary->base,
13897                                                      BIT(DRM_MODE_BLEND_PIXEL_NONE) |
13898                                                      BIT(DRM_MODE_BLEND_PREMULTI) |
13899                                                      BIT(DRM_MODE_BLEND_COVERAGE));
13900         }
13901
13902         drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
13903
13904         return primary;
13905
13906 fail:
13907         kfree(state);
13908         kfree(primary);
13909
13910         return ERR_PTR(ret);
13911 }
13912
13913 static struct intel_plane *
13914 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13915                           enum pipe pipe)
13916 {
13917         struct intel_plane *cursor = NULL;
13918         struct intel_plane_state *state = NULL;
13919         int ret;
13920
13921         cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
13922         if (!cursor) {
13923                 ret = -ENOMEM;
13924                 goto fail;
13925         }
13926
13927         state = intel_create_plane_state(&cursor->base);
13928         if (!state) {
13929                 ret = -ENOMEM;
13930                 goto fail;
13931         }
13932
13933         cursor->base.state = &state->base;
13934
13935         cursor->pipe = pipe;
13936         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
13937         cursor->id = PLANE_CURSOR;
13938         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
13939
13940         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
13941                 cursor->max_stride = i845_cursor_max_stride;
13942                 cursor->update_plane = i845_update_cursor;
13943                 cursor->disable_plane = i845_disable_cursor;
13944                 cursor->get_hw_state = i845_cursor_get_hw_state;
13945                 cursor->check_plane = i845_check_cursor;
13946         } else {
13947                 cursor->max_stride = i9xx_cursor_max_stride;
13948                 cursor->update_plane = i9xx_update_cursor;
13949                 cursor->disable_plane = i9xx_disable_cursor;
13950                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
13951                 cursor->check_plane = i9xx_check_cursor;
13952         }
13953
13954         cursor->cursor.base = ~0;
13955         cursor->cursor.cntl = ~0;
13956
13957         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
13958                 cursor->cursor.size = ~0;
13959
13960         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
13961                                        0, &intel_cursor_plane_funcs,
13962                                        intel_cursor_formats,
13963                                        ARRAY_SIZE(intel_cursor_formats),
13964                                        cursor_format_modifiers,
13965                                        DRM_PLANE_TYPE_CURSOR,
13966                                        "cursor %c", pipe_name(pipe));
13967         if (ret)
13968                 goto fail;
13969
13970         if (INTEL_GEN(dev_priv) >= 4)
13971                 drm_plane_create_rotation_property(&cursor->base,
13972                                                    DRM_MODE_ROTATE_0,
13973                                                    DRM_MODE_ROTATE_0 |
13974                                                    DRM_MODE_ROTATE_180);
13975
13976         if (INTEL_GEN(dev_priv) >= 9)
13977                 state->scaler_id = -1;
13978
13979         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
13980
13981         return cursor;
13982
13983 fail:
13984         kfree(state);
13985         kfree(cursor);
13986
13987         return ERR_PTR(ret);
13988 }
13989
13990 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
13991                                     struct intel_crtc_state *crtc_state)
13992 {
13993         struct intel_crtc_scaler_state *scaler_state =
13994                 &crtc_state->scaler_state;
13995         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13996         int i;
13997
13998         crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
13999         if (!crtc->num_scalers)
14000                 return;
14001
14002         for (i = 0; i < crtc->num_scalers; i++) {
14003                 struct intel_scaler *scaler = &scaler_state->scalers[i];
14004
14005                 scaler->in_use = 0;
14006                 scaler->mode = 0;
14007         }
14008
14009         scaler_state->scaler_id = -1;
14010 }
14011
14012 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
14013 {
14014         struct intel_crtc *intel_crtc;
14015         struct intel_crtc_state *crtc_state = NULL;
14016         struct intel_plane *primary = NULL;
14017         struct intel_plane *cursor = NULL;
14018         int sprite, ret;
14019
14020         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14021         if (!intel_crtc)
14022                 return -ENOMEM;
14023
14024         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14025         if (!crtc_state) {
14026                 ret = -ENOMEM;
14027                 goto fail;
14028         }
14029         intel_crtc->config = crtc_state;
14030         intel_crtc->base.state = &crtc_state->base;
14031         crtc_state->base.crtc = &intel_crtc->base;
14032
14033         primary = intel_primary_plane_create(dev_priv, pipe);
14034         if (IS_ERR(primary)) {
14035                 ret = PTR_ERR(primary);
14036                 goto fail;
14037         }
14038         intel_crtc->plane_ids_mask |= BIT(primary->id);
14039
14040         for_each_sprite(dev_priv, pipe, sprite) {
14041                 struct intel_plane *plane;
14042
14043                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
14044                 if (IS_ERR(plane)) {
14045                         ret = PTR_ERR(plane);
14046                         goto fail;
14047                 }
14048                 intel_crtc->plane_ids_mask |= BIT(plane->id);
14049         }
14050
14051         cursor = intel_cursor_plane_create(dev_priv, pipe);
14052         if (IS_ERR(cursor)) {
14053                 ret = PTR_ERR(cursor);
14054                 goto fail;
14055         }
14056         intel_crtc->plane_ids_mask |= BIT(cursor->id);
14057
14058         ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
14059                                         &primary->base, &cursor->base,
14060                                         &intel_crtc_funcs,
14061                                         "pipe %c", pipe_name(pipe));
14062         if (ret)
14063                 goto fail;
14064
14065         intel_crtc->pipe = pipe;
14066
14067         /* initialize shared scalers */
14068         intel_crtc_init_scalers(intel_crtc, crtc_state);
14069
14070         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
14071                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
14072         dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
14073
14074         if (INTEL_GEN(dev_priv) < 9) {
14075                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
14076
14077                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14078                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
14079                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
14080         }
14081
14082         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14083
14084         intel_color_init(&intel_crtc->base);
14085
14086         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14087
14088         return 0;
14089
14090 fail:
14091         /*
14092          * drm_mode_config_cleanup() will free up any
14093          * crtcs/planes already initialized.
14094          */
14095         kfree(crtc_state);
14096         kfree(intel_crtc);
14097
14098         return ret;
14099 }
14100
14101 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14102 {
14103         struct drm_device *dev = connector->base.dev;
14104
14105         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14106
14107         if (!connector->base.state->crtc)
14108                 return INVALID_PIPE;
14109
14110         return to_intel_crtc(connector->base.state->crtc)->pipe;
14111 }
14112
14113 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14114                                       struct drm_file *file)
14115 {
14116         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14117         struct drm_crtc *drmmode_crtc;
14118         struct intel_crtc *crtc;
14119
14120         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
14121         if (!drmmode_crtc)
14122                 return -ENOENT;
14123
14124         crtc = to_intel_crtc(drmmode_crtc);
14125         pipe_from_crtc_id->pipe = crtc->pipe;
14126
14127         return 0;
14128 }
14129
14130 static int intel_encoder_clones(struct intel_encoder *encoder)
14131 {
14132         struct drm_device *dev = encoder->base.dev;
14133         struct intel_encoder *source_encoder;
14134         int index_mask = 0;
14135         int entry = 0;
14136
14137         for_each_intel_encoder(dev, source_encoder) {
14138                 if (encoders_cloneable(encoder, source_encoder))
14139                         index_mask |= (1 << entry);
14140
14141                 entry++;
14142         }
14143
14144         return index_mask;
14145 }
14146
14147 static bool has_edp_a(struct drm_i915_private *dev_priv)
14148 {
14149         if (!IS_MOBILE(dev_priv))
14150                 return false;
14151
14152         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14153                 return false;
14154
14155         if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14156                 return false;
14157
14158         return true;
14159 }
14160
14161 static bool intel_crt_present(struct drm_i915_private *dev_priv)
14162 {
14163         if (INTEL_GEN(dev_priv) >= 9)
14164                 return false;
14165
14166         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
14167                 return false;
14168
14169         if (IS_CHERRYVIEW(dev_priv))
14170                 return false;
14171
14172         if (HAS_PCH_LPT_H(dev_priv) &&
14173             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14174                 return false;
14175
14176         /* DDI E can't be used if DDI A requires 4 lanes */
14177         if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14178                 return false;
14179
14180         if (!dev_priv->vbt.int_crt_support)
14181                 return false;
14182
14183         return true;
14184 }
14185
14186 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14187 {
14188         int pps_num;
14189         int pps_idx;
14190
14191         if (HAS_DDI(dev_priv))
14192                 return;
14193         /*
14194          * This w/a is needed at least on CPT/PPT, but to be sure apply it
14195          * everywhere where registers can be write protected.
14196          */
14197         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14198                 pps_num = 2;
14199         else
14200                 pps_num = 1;
14201
14202         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
14203                 u32 val = I915_READ(PP_CONTROL(pps_idx));
14204
14205                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
14206                 I915_WRITE(PP_CONTROL(pps_idx), val);
14207         }
14208 }
14209
14210 static void intel_pps_init(struct drm_i915_private *dev_priv)
14211 {
14212         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
14213                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
14214         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14215                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
14216         else
14217                 dev_priv->pps_mmio_base = PPS_BASE;
14218
14219         intel_pps_unlock_regs_wa(dev_priv);
14220 }
14221
14222 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
14223 {
14224         struct intel_encoder *encoder;
14225         bool dpd_is_edp = false;
14226
14227         intel_pps_init(dev_priv);
14228
14229         if (INTEL_INFO(dev_priv)->num_pipes == 0)
14230                 return;
14231
14232         /*
14233          * intel_edp_init_connector() depends on this completing first, to
14234          * prevent the registeration of both eDP and LVDS and the incorrect
14235          * sharing of the PPS.
14236          */
14237         intel_lvds_init(dev_priv);
14238
14239         if (intel_crt_present(dev_priv))
14240                 intel_crt_init(dev_priv);
14241
14242         if (IS_ICELAKE(dev_priv)) {
14243                 intel_ddi_init(dev_priv, PORT_A);
14244                 intel_ddi_init(dev_priv, PORT_B);
14245                 intel_ddi_init(dev_priv, PORT_C);
14246                 intel_ddi_init(dev_priv, PORT_D);
14247                 intel_ddi_init(dev_priv, PORT_E);
14248                 intel_ddi_init(dev_priv, PORT_F);
14249         } else if (IS_GEN9_LP(dev_priv)) {
14250                 /*
14251                  * FIXME: Broxton doesn't support port detection via the
14252                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14253                  * detect the ports.
14254                  */
14255                 intel_ddi_init(dev_priv, PORT_A);
14256                 intel_ddi_init(dev_priv, PORT_B);
14257                 intel_ddi_init(dev_priv, PORT_C);
14258
14259                 vlv_dsi_init(dev_priv);
14260         } else if (HAS_DDI(dev_priv)) {
14261                 int found;
14262
14263                 /*
14264                  * Haswell uses DDI functions to detect digital outputs.
14265                  * On SKL pre-D0 the strap isn't connected, so we assume
14266                  * it's there.
14267                  */
14268                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14269                 /* WaIgnoreDDIAStrap: skl */
14270                 if (found || IS_GEN9_BC(dev_priv))
14271                         intel_ddi_init(dev_priv, PORT_A);
14272
14273                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
14274                  * register */
14275                 found = I915_READ(SFUSE_STRAP);
14276
14277                 if (found & SFUSE_STRAP_DDIB_DETECTED)
14278                         intel_ddi_init(dev_priv, PORT_B);
14279                 if (found & SFUSE_STRAP_DDIC_DETECTED)
14280                         intel_ddi_init(dev_priv, PORT_C);
14281                 if (found & SFUSE_STRAP_DDID_DETECTED)
14282                         intel_ddi_init(dev_priv, PORT_D);
14283                 if (found & SFUSE_STRAP_DDIF_DETECTED)
14284                         intel_ddi_init(dev_priv, PORT_F);
14285                 /*
14286                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14287                  */
14288                 if (IS_GEN9_BC(dev_priv) &&
14289                     (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14290                      dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14291                      dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14292                         intel_ddi_init(dev_priv, PORT_E);
14293
14294         } else if (HAS_PCH_SPLIT(dev_priv)) {
14295                 int found;
14296                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
14297
14298                 if (has_edp_a(dev_priv))
14299                         intel_dp_init(dev_priv, DP_A, PORT_A);
14300
14301                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14302                         /* PCH SDVOB multiplex with HDMIB */
14303                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
14304                         if (!found)
14305                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
14306                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14307                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
14308                 }
14309
14310                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14311                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
14312
14313                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14314                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
14315
14316                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
14317                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
14318
14319                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14320                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
14321         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
14322                 bool has_edp, has_port;
14323
14324                 /*
14325                  * The DP_DETECTED bit is the latched state of the DDC
14326                  * SDA pin at boot. However since eDP doesn't require DDC
14327                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
14328                  * eDP ports may have been muxed to an alternate function.
14329                  * Thus we can't rely on the DP_DETECTED bit alone to detect
14330                  * eDP ports. Consult the VBT as well as DP_DETECTED to
14331                  * detect eDP ports.
14332                  *
14333                  * Sadly the straps seem to be missing sometimes even for HDMI
14334                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14335                  * and VBT for the presence of the port. Additionally we can't
14336                  * trust the port type the VBT declares as we've seen at least
14337                  * HDMI ports that the VBT claim are DP or eDP.
14338                  */
14339                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
14340                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14341                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14342                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
14343                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14344                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
14345
14346                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
14347                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14348                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14349                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
14350                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14351                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
14352
14353                 if (IS_CHERRYVIEW(dev_priv)) {
14354                         /*
14355                          * eDP not supported on port D,
14356                          * so no need to worry about it
14357                          */
14358                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14359                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14360                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
14361                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14362                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
14363                 }
14364
14365                 vlv_dsi_init(dev_priv);
14366         } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
14367                 bool found = false;
14368
14369                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14370                         DRM_DEBUG_KMS("probing SDVOB\n");
14371                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
14372                         if (!found && IS_G4X(dev_priv)) {
14373                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14374                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
14375                         }
14376
14377                         if (!found && IS_G4X(dev_priv))
14378                                 intel_dp_init(dev_priv, DP_B, PORT_B);
14379                 }
14380
14381                 /* Before G4X SDVOC doesn't have its own detect register */
14382
14383                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14384                         DRM_DEBUG_KMS("probing SDVOC\n");
14385                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
14386                 }
14387
14388                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14389
14390                         if (IS_G4X(dev_priv)) {
14391                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14392                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
14393                         }
14394                         if (IS_G4X(dev_priv))
14395                                 intel_dp_init(dev_priv, DP_C, PORT_C);
14396                 }
14397
14398                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
14399                         intel_dp_init(dev_priv, DP_D, PORT_D);
14400         } else if (IS_GEN2(dev_priv))
14401                 intel_dvo_init(dev_priv);
14402
14403         if (SUPPORTS_TV(dev_priv))
14404                 intel_tv_init(dev_priv);
14405
14406         intel_psr_init(dev_priv);
14407
14408         for_each_intel_encoder(&dev_priv->drm, encoder) {
14409                 encoder->base.possible_crtcs = encoder->crtc_mask;
14410                 encoder->base.possible_clones =
14411                         intel_encoder_clones(encoder);
14412         }
14413
14414         intel_init_pch_refclk(dev_priv);
14415
14416         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
14417 }
14418
14419 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14420 {
14421         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14422         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14423
14424         drm_framebuffer_cleanup(fb);
14425
14426         i915_gem_object_lock(obj);
14427         WARN_ON(!obj->framebuffer_references--);
14428         i915_gem_object_unlock(obj);
14429
14430         i915_gem_object_put(obj);
14431
14432         kfree(intel_fb);
14433 }
14434
14435 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14436                                                 struct drm_file *file,
14437                                                 unsigned int *handle)
14438 {
14439         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14440
14441         if (obj->userptr.mm) {
14442                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14443                 return -EINVAL;
14444         }
14445
14446         return drm_gem_handle_create(file, &obj->base, handle);
14447 }
14448
14449 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14450                                         struct drm_file *file,
14451                                         unsigned flags, unsigned color,
14452                                         struct drm_clip_rect *clips,
14453                                         unsigned num_clips)
14454 {
14455         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14456
14457         i915_gem_object_flush_if_display(obj);
14458         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
14459
14460         return 0;
14461 }
14462
14463 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14464         .destroy = intel_user_framebuffer_destroy,
14465         .create_handle = intel_user_framebuffer_create_handle,
14466         .dirty = intel_user_framebuffer_dirty,
14467 };
14468
14469 static
14470 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
14471                          uint64_t fb_modifier, uint32_t pixel_format)
14472 {
14473         struct intel_crtc *crtc;
14474         struct intel_plane *plane;
14475
14476         /*
14477          * We assume the primary plane for pipe A has
14478          * the highest stride limits of them all.
14479          */
14480         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
14481         plane = to_intel_plane(crtc->base.primary);
14482
14483         return plane->max_stride(plane, pixel_format, fb_modifier,
14484                                  DRM_MODE_ROTATE_0);
14485 }
14486
14487 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14488                                   struct drm_i915_gem_object *obj,
14489                                   struct drm_mode_fb_cmd2 *mode_cmd)
14490 {
14491         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
14492         struct drm_framebuffer *fb = &intel_fb->base;
14493         struct drm_format_name_buf format_name;
14494         u32 pitch_limit;
14495         unsigned int tiling, stride;
14496         int ret = -EINVAL;
14497         int i;
14498
14499         i915_gem_object_lock(obj);
14500         obj->framebuffer_references++;
14501         tiling = i915_gem_object_get_tiling(obj);
14502         stride = i915_gem_object_get_stride(obj);
14503         i915_gem_object_unlock(obj);
14504
14505         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14506                 /*
14507                  * If there's a fence, enforce that
14508                  * the fb modifier and tiling mode match.
14509                  */
14510                 if (tiling != I915_TILING_NONE &&
14511                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14512                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
14513                         goto err;
14514                 }
14515         } else {
14516                 if (tiling == I915_TILING_X) {
14517                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14518                 } else if (tiling == I915_TILING_Y) {
14519                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
14520                         goto err;
14521                 }
14522         }
14523
14524         /* Passed in modifier sanity checking. */
14525         switch (mode_cmd->modifier[0]) {
14526         case I915_FORMAT_MOD_Y_TILED_CCS:
14527         case I915_FORMAT_MOD_Yf_TILED_CCS:
14528                 switch (mode_cmd->pixel_format) {
14529                 case DRM_FORMAT_XBGR8888:
14530                 case DRM_FORMAT_ABGR8888:
14531                 case DRM_FORMAT_XRGB8888:
14532                 case DRM_FORMAT_ARGB8888:
14533                         break;
14534                 default:
14535                         DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
14536                         goto err;
14537                 }
14538                 /* fall through */
14539         case I915_FORMAT_MOD_Y_TILED:
14540         case I915_FORMAT_MOD_Yf_TILED:
14541                 if (INTEL_GEN(dev_priv) < 9) {
14542                         DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
14543                                       mode_cmd->modifier[0]);
14544                         goto err;
14545                 }
14546         case DRM_FORMAT_MOD_LINEAR:
14547         case I915_FORMAT_MOD_X_TILED:
14548                 break;
14549         default:
14550                 DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
14551                               mode_cmd->modifier[0]);
14552                 goto err;
14553         }
14554
14555         /*
14556          * gen2/3 display engine uses the fence if present,
14557          * so the tiling mode must match the fb modifier exactly.
14558          */
14559         if (INTEL_GEN(dev_priv) < 4 &&
14560             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14561                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
14562                 goto err;
14563         }
14564
14565         pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
14566                                            mode_cmd->pixel_format);
14567         if (mode_cmd->pitches[0] > pitch_limit) {
14568                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
14569                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
14570                               "tiled" : "linear",
14571                               mode_cmd->pitches[0], pitch_limit);
14572                 goto err;
14573         }
14574
14575         /*
14576          * If there's a fence, enforce that
14577          * the fb pitch and fence stride match.
14578          */
14579         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
14580                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
14581                               mode_cmd->pitches[0], stride);
14582                 goto err;
14583         }
14584
14585         /* Reject formats not supported by any plane early. */
14586         switch (mode_cmd->pixel_format) {
14587         case DRM_FORMAT_C8:
14588         case DRM_FORMAT_RGB565:
14589         case DRM_FORMAT_XRGB8888:
14590         case DRM_FORMAT_ARGB8888:
14591                 break;
14592         case DRM_FORMAT_XRGB1555:
14593                 if (INTEL_GEN(dev_priv) > 3) {
14594                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14595                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14596                         goto err;
14597                 }
14598                 break;
14599         case DRM_FORMAT_ABGR8888:
14600                 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
14601                     INTEL_GEN(dev_priv) < 9) {
14602                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14603                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14604                         goto err;
14605                 }
14606                 break;
14607         case DRM_FORMAT_XBGR8888:
14608         case DRM_FORMAT_XRGB2101010:
14609         case DRM_FORMAT_XBGR2101010:
14610                 if (INTEL_GEN(dev_priv) < 4) {
14611                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14612                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14613                         goto err;
14614                 }
14615                 break;
14616         case DRM_FORMAT_ABGR2101010:
14617                 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
14618                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14619                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14620                         goto err;
14621                 }
14622                 break;
14623         case DRM_FORMAT_YUYV:
14624         case DRM_FORMAT_UYVY:
14625         case DRM_FORMAT_YVYU:
14626         case DRM_FORMAT_VYUY:
14627                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
14628                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14629                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14630                         goto err;
14631                 }
14632                 break;
14633         case DRM_FORMAT_NV12:
14634                 if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
14635                     IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) {
14636                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14637                                       drm_get_format_name(mode_cmd->pixel_format,
14638                                                           &format_name));
14639                         goto err;
14640                 }
14641                 break;
14642         default:
14643                 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14644                               drm_get_format_name(mode_cmd->pixel_format, &format_name));
14645                 goto err;
14646         }
14647
14648         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14649         if (mode_cmd->offsets[0] != 0)
14650                 goto err;
14651
14652         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
14653
14654         if (fb->format->format == DRM_FORMAT_NV12 &&
14655             (fb->width < SKL_MIN_YUV_420_SRC_W ||
14656              fb->height < SKL_MIN_YUV_420_SRC_H ||
14657              (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
14658                 DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
14659                 return -EINVAL;
14660         }
14661
14662         for (i = 0; i < fb->format->num_planes; i++) {
14663                 u32 stride_alignment;
14664
14665                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
14666                         DRM_DEBUG_KMS("bad plane %d handle\n", i);
14667                         goto err;
14668                 }
14669
14670                 stride_alignment = intel_fb_stride_alignment(fb, i);
14671
14672                 /*
14673                  * Display WA #0531: skl,bxt,kbl,glk
14674                  *
14675                  * Render decompression and plane width > 3840
14676                  * combined with horizontal panning requires the
14677                  * plane stride to be a multiple of 4. We'll just
14678                  * require the entire fb to accommodate that to avoid
14679                  * potential runtime errors at plane configuration time.
14680                  */
14681                 if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
14682                     is_ccs_modifier(fb->modifier))
14683                         stride_alignment *= 4;
14684
14685                 if (fb->pitches[i] & (stride_alignment - 1)) {
14686                         DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
14687                                       i, fb->pitches[i], stride_alignment);
14688                         goto err;
14689                 }
14690
14691                 fb->obj[i] = &obj->base;
14692         }
14693
14694         ret = intel_fill_fb_info(dev_priv, fb);
14695         if (ret)
14696                 goto err;
14697
14698         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
14699         if (ret) {
14700                 DRM_ERROR("framebuffer init failed %d\n", ret);
14701                 goto err;
14702         }
14703
14704         return 0;
14705
14706 err:
14707         i915_gem_object_lock(obj);
14708         obj->framebuffer_references--;
14709         i915_gem_object_unlock(obj);
14710         return ret;
14711 }
14712
14713 static struct drm_framebuffer *
14714 intel_user_framebuffer_create(struct drm_device *dev,
14715                               struct drm_file *filp,
14716                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
14717 {
14718         struct drm_framebuffer *fb;
14719         struct drm_i915_gem_object *obj;
14720         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14721
14722         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
14723         if (!obj)
14724                 return ERR_PTR(-ENOENT);
14725
14726         fb = intel_framebuffer_create(obj, &mode_cmd);
14727         if (IS_ERR(fb))
14728                 i915_gem_object_put(obj);
14729
14730         return fb;
14731 }
14732
14733 static void intel_atomic_state_free(struct drm_atomic_state *state)
14734 {
14735         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14736
14737         drm_atomic_state_default_release(state);
14738
14739         i915_sw_fence_fini(&intel_state->commit_ready);
14740
14741         kfree(state);
14742 }
14743
14744 static enum drm_mode_status
14745 intel_mode_valid(struct drm_device *dev,
14746                  const struct drm_display_mode *mode)
14747 {
14748         struct drm_i915_private *dev_priv = to_i915(dev);
14749         int hdisplay_max, htotal_max;
14750         int vdisplay_max, vtotal_max;
14751
14752         /*
14753          * Can't reject DBLSCAN here because Xorg ddxen can add piles
14754          * of DBLSCAN modes to the output's mode list when they detect
14755          * the scaling mode property on the connector. And they don't
14756          * ask the kernel to validate those modes in any way until
14757          * modeset time at which point the client gets a protocol error.
14758          * So in order to not upset those clients we silently ignore the
14759          * DBLSCAN flag on such connectors. For other connectors we will
14760          * reject modes with the DBLSCAN flag in encoder->compute_config().
14761          * And we always reject DBLSCAN modes in connector->mode_valid()
14762          * as we never want such modes on the connector's mode list.
14763          */
14764
14765         if (mode->vscan > 1)
14766                 return MODE_NO_VSCAN;
14767
14768         if (mode->flags & DRM_MODE_FLAG_HSKEW)
14769                 return MODE_H_ILLEGAL;
14770
14771         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
14772                            DRM_MODE_FLAG_NCSYNC |
14773                            DRM_MODE_FLAG_PCSYNC))
14774                 return MODE_HSYNC;
14775
14776         if (mode->flags & (DRM_MODE_FLAG_BCAST |
14777                            DRM_MODE_FLAG_PIXMUX |
14778                            DRM_MODE_FLAG_CLKDIV2))
14779                 return MODE_BAD;
14780
14781         if (INTEL_GEN(dev_priv) >= 9 ||
14782             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
14783                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
14784                 vdisplay_max = 4096;
14785                 htotal_max = 8192;
14786                 vtotal_max = 8192;
14787         } else if (INTEL_GEN(dev_priv) >= 3) {
14788                 hdisplay_max = 4096;
14789                 vdisplay_max = 4096;
14790                 htotal_max = 8192;
14791                 vtotal_max = 8192;
14792         } else {
14793                 hdisplay_max = 2048;
14794                 vdisplay_max = 2048;
14795                 htotal_max = 4096;
14796                 vtotal_max = 4096;
14797         }
14798
14799         if (mode->hdisplay > hdisplay_max ||
14800             mode->hsync_start > htotal_max ||
14801             mode->hsync_end > htotal_max ||
14802             mode->htotal > htotal_max)
14803                 return MODE_H_ILLEGAL;
14804
14805         if (mode->vdisplay > vdisplay_max ||
14806             mode->vsync_start > vtotal_max ||
14807             mode->vsync_end > vtotal_max ||
14808             mode->vtotal > vtotal_max)
14809                 return MODE_V_ILLEGAL;
14810
14811         return MODE_OK;
14812 }
14813
14814 static const struct drm_mode_config_funcs intel_mode_funcs = {
14815         .fb_create = intel_user_framebuffer_create,
14816         .get_format_info = intel_get_format_info,
14817         .output_poll_changed = intel_fbdev_output_poll_changed,
14818         .mode_valid = intel_mode_valid,
14819         .atomic_check = intel_atomic_check,
14820         .atomic_commit = intel_atomic_commit,
14821         .atomic_state_alloc = intel_atomic_state_alloc,
14822         .atomic_state_clear = intel_atomic_state_clear,
14823         .atomic_state_free = intel_atomic_state_free,
14824 };
14825
14826 /**
14827  * intel_init_display_hooks - initialize the display modesetting hooks
14828  * @dev_priv: device private
14829  */
14830 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14831 {
14832         intel_init_cdclk_hooks(dev_priv);
14833
14834         if (INTEL_GEN(dev_priv) >= 9) {
14835                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14836                 dev_priv->display.get_initial_plane_config =
14837                         skylake_get_initial_plane_config;
14838                 dev_priv->display.crtc_compute_clock =
14839                         haswell_crtc_compute_clock;
14840                 dev_priv->display.crtc_enable = haswell_crtc_enable;
14841                 dev_priv->display.crtc_disable = haswell_crtc_disable;
14842         } else if (HAS_DDI(dev_priv)) {
14843                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14844                 dev_priv->display.get_initial_plane_config =
14845                         i9xx_get_initial_plane_config;
14846                 dev_priv->display.crtc_compute_clock =
14847                         haswell_crtc_compute_clock;
14848                 dev_priv->display.crtc_enable = haswell_crtc_enable;
14849                 dev_priv->display.crtc_disable = haswell_crtc_disable;
14850         } else if (HAS_PCH_SPLIT(dev_priv)) {
14851                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14852                 dev_priv->display.get_initial_plane_config =
14853                         i9xx_get_initial_plane_config;
14854                 dev_priv->display.crtc_compute_clock =
14855                         ironlake_crtc_compute_clock;
14856                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
14857                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
14858         } else if (IS_CHERRYVIEW(dev_priv)) {
14859                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14860                 dev_priv->display.get_initial_plane_config =
14861                         i9xx_get_initial_plane_config;
14862                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
14863                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14864                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14865         } else if (IS_VALLEYVIEW(dev_priv)) {
14866                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14867                 dev_priv->display.get_initial_plane_config =
14868                         i9xx_get_initial_plane_config;
14869                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
14870                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14871                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14872         } else if (IS_G4X(dev_priv)) {
14873                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14874                 dev_priv->display.get_initial_plane_config =
14875                         i9xx_get_initial_plane_config;
14876                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
14877                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14878                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14879         } else if (IS_PINEVIEW(dev_priv)) {
14880                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14881                 dev_priv->display.get_initial_plane_config =
14882                         i9xx_get_initial_plane_config;
14883                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
14884                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14885                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14886         } else if (!IS_GEN2(dev_priv)) {
14887                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14888                 dev_priv->display.get_initial_plane_config =
14889                         i9xx_get_initial_plane_config;
14890                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14891                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14892                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14893         } else {
14894                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14895                 dev_priv->display.get_initial_plane_config =
14896                         i9xx_get_initial_plane_config;
14897                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
14898                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14899                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14900         }
14901
14902         if (IS_GEN5(dev_priv)) {
14903                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
14904         } else if (IS_GEN6(dev_priv)) {
14905                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
14906         } else if (IS_IVYBRIDGE(dev_priv)) {
14907                 /* FIXME: detect B0+ stepping and use auto training */
14908                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
14909         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
14910                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
14911         }
14912
14913         if (INTEL_GEN(dev_priv) >= 9)
14914                 dev_priv->display.update_crtcs = skl_update_crtcs;
14915         else
14916                 dev_priv->display.update_crtcs = intel_update_crtcs;
14917 }
14918
14919 /*
14920  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14921  */
14922 static void quirk_ssc_force_disable(struct drm_device *dev)
14923 {
14924         struct drm_i915_private *dev_priv = to_i915(dev);
14925         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
14926         DRM_INFO("applying lvds SSC disable quirk\n");
14927 }
14928
14929 /*
14930  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
14931  * brightness value
14932  */
14933 static void quirk_invert_brightness(struct drm_device *dev)
14934 {
14935         struct drm_i915_private *dev_priv = to_i915(dev);
14936         dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
14937         DRM_INFO("applying inverted panel brightness quirk\n");
14938 }
14939
14940 /* Some VBT's incorrectly indicate no backlight is present */
14941 static void quirk_backlight_present(struct drm_device *dev)
14942 {
14943         struct drm_i915_private *dev_priv = to_i915(dev);
14944         dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
14945         DRM_INFO("applying backlight present quirk\n");
14946 }
14947
14948 /* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
14949  * which is 300 ms greater than eDP spec T12 min.
14950  */
14951 static void quirk_increase_t12_delay(struct drm_device *dev)
14952 {
14953         struct drm_i915_private *dev_priv = to_i915(dev);
14954
14955         dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
14956         DRM_INFO("Applying T12 delay quirk\n");
14957 }
14958
14959 /*
14960  * GeminiLake NUC HDMI outputs require additional off time
14961  * this allows the onboard retimer to correctly sync to signal
14962  */
14963 static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
14964 {
14965         struct drm_i915_private *dev_priv = to_i915(dev);
14966
14967         dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
14968         DRM_INFO("Applying Increase DDI Disabled quirk\n");
14969 }
14970
14971 struct intel_quirk {
14972         int device;
14973         int subsystem_vendor;
14974         int subsystem_device;
14975         void (*hook)(struct drm_device *dev);
14976 };
14977
14978 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
14979 struct intel_dmi_quirk {
14980         void (*hook)(struct drm_device *dev);
14981         const struct dmi_system_id (*dmi_id_list)[];
14982 };
14983
14984 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
14985 {
14986         DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
14987         return 1;
14988 }
14989
14990 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
14991         {
14992                 .dmi_id_list = &(const struct dmi_system_id[]) {
14993                         {
14994                                 .callback = intel_dmi_reverse_brightness,
14995                                 .ident = "NCR Corporation",
14996                                 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
14997                                             DMI_MATCH(DMI_PRODUCT_NAME, ""),
14998                                 },
14999                         },
15000                         { }  /* terminating entry */
15001                 },
15002                 .hook = quirk_invert_brightness,
15003         },
15004 };
15005
15006 static struct intel_quirk intel_quirks[] = {
15007         /* Lenovo U160 cannot use SSC on LVDS */
15008         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
15009
15010         /* Sony Vaio Y cannot use SSC on LVDS */
15011         { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
15012
15013         /* Acer Aspire 5734Z must invert backlight brightness */
15014         { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
15015
15016         /* Acer/eMachines G725 */
15017         { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15018
15019         /* Acer/eMachines e725 */
15020         { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15021
15022         /* Acer/Packard Bell NCL20 */
15023         { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15024
15025         /* Acer Aspire 4736Z */
15026         { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
15027
15028         /* Acer Aspire 5336 */
15029         { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
15030
15031         /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15032         { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
15033
15034         /* Acer C720 Chromebook (Core i3 4005U) */
15035         { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15036
15037         /* Apple Macbook 2,1 (Core 2 T7400) */
15038         { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15039
15040         /* Apple Macbook 4,1 */
15041         { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15042
15043         /* Toshiba CB35 Chromebook (Celeron 2955U) */
15044         { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
15045
15046         /* HP Chromebook 14 (Celeron 2955U) */
15047         { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
15048
15049         /* Dell Chromebook 11 */
15050         { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
15051
15052         /* Dell Chromebook 11 (2015 version) */
15053         { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
15054
15055         /* Toshiba Satellite P50-C-18C */
15056         { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
15057
15058         /* GeminiLake NUC */
15059         { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
15060         { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
15061         /* ASRock ITX*/
15062         { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
15063         { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
15064 };
15065
15066 static void intel_init_quirks(struct drm_device *dev)
15067 {
15068         struct pci_dev *d = dev->pdev;
15069         int i;
15070
15071         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15072                 struct intel_quirk *q = &intel_quirks[i];
15073
15074                 if (d->device == q->device &&
15075                     (d->subsystem_vendor == q->subsystem_vendor ||
15076                      q->subsystem_vendor == PCI_ANY_ID) &&
15077                     (d->subsystem_device == q->subsystem_device ||
15078                      q->subsystem_device == PCI_ANY_ID))
15079                         q->hook(dev);
15080         }
15081         for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15082                 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15083                         intel_dmi_quirks[i].hook(dev);
15084         }
15085 }
15086
15087 /* Disable the VGA plane that we never use */
15088 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15089 {
15090         struct pci_dev *pdev = dev_priv->drm.pdev;
15091         u8 sr1;
15092         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15093
15094         /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15095         vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15096         outb(SR01, VGA_SR_INDEX);
15097         sr1 = inb(VGA_SR_DATA);
15098         outb(sr1 | 1<<5, VGA_SR_DATA);
15099         vga_put(pdev, VGA_RSRC_LEGACY_IO);
15100         udelay(300);
15101
15102         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15103         POSTING_READ(vga_reg);
15104 }
15105
15106 void intel_modeset_init_hw(struct drm_device *dev)
15107 {
15108         struct drm_i915_private *dev_priv = to_i915(dev);
15109
15110         intel_update_cdclk(dev_priv);
15111         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15112         dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15113 }
15114
15115 /*
15116  * Calculate what we think the watermarks should be for the state we've read
15117  * out of the hardware and then immediately program those watermarks so that
15118  * we ensure the hardware settings match our internal state.
15119  *
15120  * We can calculate what we think WM's should be by creating a duplicate of the
15121  * current state (which was constructed during hardware readout) and running it
15122  * through the atomic check code to calculate new watermark values in the
15123  * state object.
15124  */
15125 static void sanitize_watermarks(struct drm_device *dev)
15126 {
15127         struct drm_i915_private *dev_priv = to_i915(dev);
15128         struct drm_atomic_state *state;
15129         struct intel_atomic_state *intel_state;
15130         struct drm_crtc *crtc;
15131         struct drm_crtc_state *cstate;
15132         struct drm_modeset_acquire_ctx ctx;
15133         int ret;
15134         int i;
15135
15136         /* Only supported on platforms that use atomic watermark design */
15137         if (!dev_priv->display.optimize_watermarks)
15138                 return;
15139
15140         /*
15141          * We need to hold connection_mutex before calling duplicate_state so
15142          * that the connector loop is protected.
15143          */
15144         drm_modeset_acquire_init(&ctx, 0);
15145 retry:
15146         ret = drm_modeset_lock_all_ctx(dev, &ctx);
15147         if (ret == -EDEADLK) {
15148                 drm_modeset_backoff(&ctx);
15149                 goto retry;
15150         } else if (WARN_ON(ret)) {
15151                 goto fail;
15152         }
15153
15154         state = drm_atomic_helper_duplicate_state(dev, &ctx);
15155         if (WARN_ON(IS_ERR(state)))
15156                 goto fail;
15157
15158         intel_state = to_intel_atomic_state(state);
15159
15160         /*
15161          * Hardware readout is the only time we don't want to calculate
15162          * intermediate watermarks (since we don't trust the current
15163          * watermarks).
15164          */
15165         if (!HAS_GMCH_DISPLAY(dev_priv))
15166                 intel_state->skip_intermediate_wm = true;
15167
15168         ret = intel_atomic_check(dev, state);
15169         if (ret) {
15170                 /*
15171                  * If we fail here, it means that the hardware appears to be
15172                  * programmed in a way that shouldn't be possible, given our
15173                  * understanding of watermark requirements.  This might mean a
15174                  * mistake in the hardware readout code or a mistake in the
15175                  * watermark calculations for a given platform.  Raise a WARN
15176                  * so that this is noticeable.
15177                  *
15178                  * If this actually happens, we'll have to just leave the
15179                  * BIOS-programmed watermarks untouched and hope for the best.
15180                  */
15181                 WARN(true, "Could not determine valid watermarks for inherited state\n");
15182                 goto put_state;
15183         }
15184
15185         /* Write calculated watermark values back */
15186         for_each_new_crtc_in_state(state, crtc, cstate, i) {
15187                 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15188
15189                 cs->wm.need_postvbl_update = true;
15190                 dev_priv->display.optimize_watermarks(intel_state, cs);
15191
15192                 to_intel_crtc_state(crtc->state)->wm = cs->wm;
15193         }
15194
15195 put_state:
15196         drm_atomic_state_put(state);
15197 fail:
15198         drm_modeset_drop_locks(&ctx);
15199         drm_modeset_acquire_fini(&ctx);
15200 }
15201
15202 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15203 {
15204         if (IS_GEN5(dev_priv)) {
15205                 u32 fdi_pll_clk =
15206                         I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15207
15208                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
15209         } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
15210                 dev_priv->fdi_pll_freq = 270000;
15211         } else {
15212                 return;
15213         }
15214
15215         DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15216 }
15217
15218 static int intel_initial_commit(struct drm_device *dev)
15219 {
15220         struct drm_atomic_state *state = NULL;
15221         struct drm_modeset_acquire_ctx ctx;
15222         struct drm_crtc *crtc;
15223         struct drm_crtc_state *crtc_state;
15224         int ret = 0;
15225
15226         state = drm_atomic_state_alloc(dev);
15227         if (!state)
15228                 return -ENOMEM;
15229
15230         drm_modeset_acquire_init(&ctx, 0);
15231
15232 retry:
15233         state->acquire_ctx = &ctx;
15234
15235         drm_for_each_crtc(crtc, dev) {
15236                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15237                 if (IS_ERR(crtc_state)) {
15238                         ret = PTR_ERR(crtc_state);
15239                         goto out;
15240                 }
15241
15242                 if (crtc_state->active) {
15243                         ret = drm_atomic_add_affected_planes(state, crtc);
15244                         if (ret)
15245                                 goto out;
15246                 }
15247         }
15248
15249         ret = drm_atomic_commit(state);
15250
15251 out:
15252         if (ret == -EDEADLK) {
15253                 drm_atomic_state_clear(state);
15254                 drm_modeset_backoff(&ctx);
15255                 goto retry;
15256         }
15257
15258         drm_atomic_state_put(state);
15259
15260         drm_modeset_drop_locks(&ctx);
15261         drm_modeset_acquire_fini(&ctx);
15262
15263         return ret;
15264 }
15265
15266 int intel_modeset_init(struct drm_device *dev)
15267 {
15268         struct drm_i915_private *dev_priv = to_i915(dev);
15269         struct i915_ggtt *ggtt = &dev_priv->ggtt;
15270         enum pipe pipe;
15271         struct intel_crtc *crtc;
15272         int ret;
15273
15274         dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15275
15276         drm_mode_config_init(dev);
15277
15278         dev->mode_config.min_width = 0;
15279         dev->mode_config.min_height = 0;
15280
15281         dev->mode_config.preferred_depth = 24;
15282         dev->mode_config.prefer_shadow = 1;
15283
15284         dev->mode_config.allow_fb_modifiers = true;
15285
15286         dev->mode_config.funcs = &intel_mode_funcs;
15287
15288         init_llist_head(&dev_priv->atomic_helper.free_list);
15289         INIT_WORK(&dev_priv->atomic_helper.free_work,
15290                   intel_atomic_helper_free_state_worker);
15291
15292         intel_init_quirks(dev);
15293
15294         intel_init_pm(dev_priv);
15295
15296         /*
15297          * There may be no VBT; and if the BIOS enabled SSC we can
15298          * just keep using it to avoid unnecessary flicker.  Whereas if the
15299          * BIOS isn't using it, don't assume it will work even if the VBT
15300          * indicates as much.
15301          */
15302         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
15303                 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15304                                             DREF_SSC1_ENABLE);
15305
15306                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15307                         DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15308                                      bios_lvds_use_ssc ? "en" : "dis",
15309                                      dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15310                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15311                 }
15312         }
15313
15314         /* maximum framebuffer dimensions */
15315         if (IS_GEN2(dev_priv)) {
15316                 dev->mode_config.max_width = 2048;
15317                 dev->mode_config.max_height = 2048;
15318         } else if (IS_GEN3(dev_priv)) {
15319                 dev->mode_config.max_width = 4096;
15320                 dev->mode_config.max_height = 4096;
15321         } else {
15322                 dev->mode_config.max_width = 8192;
15323                 dev->mode_config.max_height = 8192;
15324         }
15325
15326         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15327                 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15328                 dev->mode_config.cursor_height = 1023;
15329         } else if (IS_GEN2(dev_priv)) {
15330                 dev->mode_config.cursor_width = 64;
15331                 dev->mode_config.cursor_height = 64;
15332         } else {
15333                 dev->mode_config.cursor_width = 256;
15334                 dev->mode_config.cursor_height = 256;
15335         }
15336
15337         dev->mode_config.fb_base = ggtt->gmadr.start;
15338
15339         DRM_DEBUG_KMS("%d display pipe%s available.\n",
15340                       INTEL_INFO(dev_priv)->num_pipes,
15341                       INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
15342
15343         for_each_pipe(dev_priv, pipe) {
15344                 ret = intel_crtc_init(dev_priv, pipe);
15345                 if (ret) {
15346                         drm_mode_config_cleanup(dev);
15347                         return ret;
15348                 }
15349         }
15350
15351         intel_shared_dpll_init(dev);
15352         intel_update_fdi_pll_freq(dev_priv);
15353
15354         intel_update_czclk(dev_priv);
15355         intel_modeset_init_hw(dev);
15356
15357         if (dev_priv->max_cdclk_freq == 0)
15358                 intel_update_max_cdclk(dev_priv);
15359
15360         /* Just disable it once at startup */
15361         i915_disable_vga(dev_priv);
15362         intel_setup_outputs(dev_priv);
15363
15364         drm_modeset_lock_all(dev);
15365         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15366         drm_modeset_unlock_all(dev);
15367
15368         for_each_intel_crtc(dev, crtc) {
15369                 struct intel_initial_plane_config plane_config = {};
15370
15371                 if (!crtc->active)
15372                         continue;
15373
15374                 /*
15375                  * Note that reserving the BIOS fb up front prevents us
15376                  * from stuffing other stolen allocations like the ring
15377                  * on top.  This prevents some ugliness at boot time, and
15378                  * can even allow for smooth boot transitions if the BIOS
15379                  * fb is large enough for the active pipe configuration.
15380                  */
15381                 dev_priv->display.get_initial_plane_config(crtc,
15382                                                            &plane_config);
15383
15384                 /*
15385                  * If the fb is shared between multiple heads, we'll
15386                  * just get the first one.
15387                  */
15388                 intel_find_initial_plane_obj(crtc, &plane_config);
15389         }
15390
15391         /*
15392          * Make sure hardware watermarks really match the state we read out.
15393          * Note that we need to do this after reconstructing the BIOS fb's
15394          * since the watermark calculation done here will use pstate->fb.
15395          */
15396         if (!HAS_GMCH_DISPLAY(dev_priv))
15397                 sanitize_watermarks(dev);
15398
15399         /*
15400          * Force all active planes to recompute their states. So that on
15401          * mode_setcrtc after probe, all the intel_plane_state variables
15402          * are already calculated and there is no assert_plane warnings
15403          * during bootup.
15404          */
15405         ret = intel_initial_commit(dev);
15406         if (ret)
15407                 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15408
15409         return 0;
15410 }
15411
15412 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15413 {
15414         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15415         /* 640x480@60Hz, ~25175 kHz */
15416         struct dpll clock = {
15417                 .m1 = 18,
15418                 .m2 = 7,
15419                 .p1 = 13,
15420                 .p2 = 4,
15421                 .n = 2,
15422         };
15423         u32 dpll, fp;
15424         int i;
15425
15426         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
15427
15428         DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15429                       pipe_name(pipe), clock.vco, clock.dot);
15430
15431         fp = i9xx_dpll_compute_fp(&clock);
15432         dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
15433                 DPLL_VGA_MODE_DIS |
15434                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
15435                 PLL_P2_DIVIDE_BY_4 |
15436                 PLL_REF_INPUT_DREFCLK |
15437                 DPLL_VCO_ENABLE;
15438
15439         I915_WRITE(FP0(pipe), fp);
15440         I915_WRITE(FP1(pipe), fp);
15441
15442         I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
15443         I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
15444         I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
15445         I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
15446         I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
15447         I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
15448         I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
15449
15450         /*
15451          * Apparently we need to have VGA mode enabled prior to changing
15452          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15453          * dividers, even though the register value does change.
15454          */
15455         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
15456         I915_WRITE(DPLL(pipe), dpll);
15457
15458         /* Wait for the clocks to stabilize. */
15459         POSTING_READ(DPLL(pipe));
15460         udelay(150);
15461
15462         /* The pixel multiplier can only be updated once the
15463          * DPLL is enabled and the clocks are stable.
15464          *
15465          * So write it again.
15466          */
15467         I915_WRITE(DPLL(pipe), dpll);
15468
15469         /* We do this three times for luck */
15470         for (i = 0; i < 3 ; i++) {
15471                 I915_WRITE(DPLL(pipe), dpll);
15472                 POSTING_READ(DPLL(pipe));
15473                 udelay(150); /* wait for warmup */
15474         }
15475
15476         I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
15477         POSTING_READ(PIPECONF(pipe));
15478
15479         intel_wait_for_pipe_scanline_moving(crtc);
15480 }
15481
15482 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15483 {
15484         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15485
15486         DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15487                       pipe_name(pipe));
15488
15489         WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
15490         WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
15491         WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
15492         WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
15493         WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
15494
15495         I915_WRITE(PIPECONF(pipe), 0);
15496         POSTING_READ(PIPECONF(pipe));
15497
15498         intel_wait_for_pipe_scanline_stopped(crtc);
15499
15500         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
15501         POSTING_READ(DPLL(pipe));
15502 }
15503
15504 static void
15505 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15506 {
15507         struct intel_crtc *crtc;
15508
15509         if (INTEL_GEN(dev_priv) >= 4)
15510                 return;
15511
15512         for_each_intel_crtc(&dev_priv->drm, crtc) {
15513                 struct intel_plane *plane =
15514                         to_intel_plane(crtc->base.primary);
15515                 struct intel_crtc *plane_crtc;
15516                 enum pipe pipe;
15517
15518                 if (!plane->get_hw_state(plane, &pipe))
15519                         continue;
15520
15521                 if (pipe == crtc->pipe)
15522                         continue;
15523
15524                 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
15525                               plane->base.base.id, plane->base.name);
15526
15527                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15528                 intel_plane_disable_noatomic(plane_crtc, plane);
15529         }
15530 }
15531
15532 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15533 {
15534         struct drm_device *dev = crtc->base.dev;
15535         struct intel_encoder *encoder;
15536
15537         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15538                 return true;
15539
15540         return false;
15541 }
15542
15543 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
15544 {
15545         struct drm_device *dev = encoder->base.dev;
15546         struct intel_connector *connector;
15547
15548         for_each_connector_on_encoder(dev, &encoder->base, connector)
15549                 return connector;
15550
15551         return NULL;
15552 }
15553
15554 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
15555                               enum pipe pch_transcoder)
15556 {
15557         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
15558                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
15559 }
15560
15561 static void intel_sanitize_crtc(struct intel_crtc *crtc,
15562                                 struct drm_modeset_acquire_ctx *ctx)
15563 {
15564         struct drm_device *dev = crtc->base.dev;
15565         struct drm_i915_private *dev_priv = to_i915(dev);
15566         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
15567
15568         /* Clear any frame start delays used for debugging left by the BIOS */
15569         if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
15570                 i915_reg_t reg = PIPECONF(cpu_transcoder);
15571
15572                 I915_WRITE(reg,
15573                            I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15574         }
15575
15576         if (crtc->active) {
15577                 struct intel_plane *plane;
15578
15579                 /* Disable everything but the primary plane */
15580                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15581                         const struct intel_plane_state *plane_state =
15582                                 to_intel_plane_state(plane->base.state);
15583
15584                         if (plane_state->base.visible &&
15585                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
15586                                 intel_plane_disable_noatomic(crtc, plane);
15587                 }
15588         }
15589
15590         /* Adjust the state of the output pipe according to whether we
15591          * have active connectors/encoders. */
15592         if (crtc->active && !intel_crtc_has_encoders(crtc))
15593                 intel_crtc_disable_noatomic(&crtc->base, ctx);
15594
15595         if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
15596                 /*
15597                  * We start out with underrun reporting disabled to avoid races.
15598                  * For correct bookkeeping mark this on active crtcs.
15599                  *
15600                  * Also on gmch platforms we dont have any hardware bits to
15601                  * disable the underrun reporting. Which means we need to start
15602                  * out with underrun reporting disabled also on inactive pipes,
15603                  * since otherwise we'll complain about the garbage we read when
15604                  * e.g. coming up after runtime pm.
15605                  *
15606                  * No protection against concurrent access is required - at
15607                  * worst a fifo underrun happens which also sets this to false.
15608                  */
15609                 crtc->cpu_fifo_underrun_disabled = true;
15610                 /*
15611                  * We track the PCH trancoder underrun reporting state
15612                  * within the crtc. With crtc for pipe A housing the underrun
15613                  * reporting state for PCH transcoder A, crtc for pipe B housing
15614                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
15615                  * and marking underrun reporting as disabled for the non-existing
15616                  * PCH transcoders B and C would prevent enabling the south
15617                  * error interrupt (see cpt_can_enable_serr_int()).
15618                  */
15619                 if (has_pch_trancoder(dev_priv, crtc->pipe))
15620                         crtc->pch_fifo_underrun_disabled = true;
15621         }
15622 }
15623
15624 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15625 {
15626         struct intel_connector *connector;
15627
15628         /* We need to check both for a crtc link (meaning that the
15629          * encoder is active and trying to read from a pipe) and the
15630          * pipe itself being active. */
15631         bool has_active_crtc = encoder->base.crtc &&
15632                 to_intel_crtc(encoder->base.crtc)->active;
15633
15634         connector = intel_encoder_find_connector(encoder);
15635         if (connector && !has_active_crtc) {
15636                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15637                               encoder->base.base.id,
15638                               encoder->base.name);
15639
15640                 /* Connector is active, but has no active pipe. This is
15641                  * fallout from our resume register restoring. Disable
15642                  * the encoder manually again. */
15643                 if (encoder->base.crtc) {
15644                         struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
15645
15646                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15647                                       encoder->base.base.id,
15648                                       encoder->base.name);
15649                         encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15650                         if (encoder->post_disable)
15651                                 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15652                 }
15653                 encoder->base.crtc = NULL;
15654
15655                 /* Inconsistent output/port/pipe state happens presumably due to
15656                  * a bug in one of the get_hw_state functions. Or someplace else
15657                  * in our code, like the register restore mess on resume. Clamp
15658                  * things to off as a safer default. */
15659
15660                 connector->base.dpms = DRM_MODE_DPMS_OFF;
15661                 connector->base.encoder = NULL;
15662         }
15663
15664         /* notify opregion of the sanitized encoder state */
15665         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
15666 }
15667
15668 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
15669 {
15670         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15671
15672         if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15673                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15674                 i915_disable_vga(dev_priv);
15675         }
15676 }
15677
15678 void i915_redisable_vga(struct drm_i915_private *dev_priv)
15679 {
15680         /* This function can be called both from intel_modeset_setup_hw_state or
15681          * at a very early point in our resume sequence, where the power well
15682          * structures are not yet restored. Since this function is at a very
15683          * paranoid "someone might have enabled VGA while we were not looking"
15684          * level, just check if the power well is enabled instead of trying to
15685          * follow the "don't touch the power well if we don't need it" policy
15686          * the rest of the driver uses. */
15687         if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15688                 return;
15689
15690         i915_redisable_vga_power_on(dev_priv);
15691
15692         intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15693 }
15694
15695 /* FIXME read out full plane state for all planes */
15696 static void readout_plane_state(struct drm_i915_private *dev_priv)
15697 {
15698         struct intel_plane *plane;
15699         struct intel_crtc *crtc;
15700
15701         for_each_intel_plane(&dev_priv->drm, plane) {
15702                 struct intel_plane_state *plane_state =
15703                         to_intel_plane_state(plane->base.state);
15704                 struct intel_crtc_state *crtc_state;
15705                 enum pipe pipe = PIPE_A;
15706                 bool visible;
15707
15708                 visible = plane->get_hw_state(plane, &pipe);
15709
15710                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15711                 crtc_state = to_intel_crtc_state(crtc->base.state);
15712
15713                 intel_set_plane_visible(crtc_state, plane_state, visible);
15714
15715                 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
15716                               plane->base.base.id, plane->base.name,
15717                               enableddisabled(visible), pipe_name(pipe));
15718         }
15719
15720         for_each_intel_crtc(&dev_priv->drm, crtc) {
15721                 struct intel_crtc_state *crtc_state =
15722                         to_intel_crtc_state(crtc->base.state);
15723
15724                 fixup_active_planes(crtc_state);
15725         }
15726 }
15727
15728 static void intel_modeset_readout_hw_state(struct drm_device *dev)
15729 {
15730         struct drm_i915_private *dev_priv = to_i915(dev);
15731         enum pipe pipe;
15732         struct intel_crtc *crtc;
15733         struct intel_encoder *encoder;
15734         struct intel_connector *connector;
15735         struct drm_connector_list_iter conn_iter;
15736         int i;
15737
15738         dev_priv->active_crtcs = 0;
15739
15740         for_each_intel_crtc(dev, crtc) {
15741                 struct intel_crtc_state *crtc_state =
15742                         to_intel_crtc_state(crtc->base.state);
15743
15744                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
15745                 memset(crtc_state, 0, sizeof(*crtc_state));
15746                 crtc_state->base.crtc = &crtc->base;
15747
15748                 crtc_state->base.active = crtc_state->base.enable =
15749                         dev_priv->display.get_pipe_config(crtc, crtc_state);
15750
15751                 crtc->base.enabled = crtc_state->base.enable;
15752                 crtc->active = crtc_state->base.active;
15753
15754                 if (crtc_state->base.active)
15755                         dev_priv->active_crtcs |= 1 << crtc->pipe;
15756
15757                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15758                               crtc->base.base.id, crtc->base.name,
15759                               enableddisabled(crtc_state->base.active));
15760         }
15761
15762         readout_plane_state(dev_priv);
15763
15764         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15765                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15766
15767                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
15768                                                         &pll->state.hw_state);
15769                 pll->state.crtc_mask = 0;
15770                 for_each_intel_crtc(dev, crtc) {
15771                         struct intel_crtc_state *crtc_state =
15772                                 to_intel_crtc_state(crtc->base.state);
15773
15774                         if (crtc_state->base.active &&
15775                             crtc_state->shared_dpll == pll)
15776                                 pll->state.crtc_mask |= 1 << crtc->pipe;
15777                 }
15778                 pll->active_mask = pll->state.crtc_mask;
15779
15780                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15781                               pll->info->name, pll->state.crtc_mask, pll->on);
15782         }
15783
15784         for_each_intel_encoder(dev, encoder) {
15785                 pipe = 0;
15786
15787                 if (encoder->get_hw_state(encoder, &pipe)) {
15788                         struct intel_crtc_state *crtc_state;
15789
15790                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15791                         crtc_state = to_intel_crtc_state(crtc->base.state);
15792
15793                         encoder->base.crtc = &crtc->base;
15794                         encoder->get_config(encoder, crtc_state);
15795                 } else {
15796                         encoder->base.crtc = NULL;
15797                 }
15798
15799                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15800                               encoder->base.base.id, encoder->base.name,
15801                               enableddisabled(encoder->base.crtc),
15802                               pipe_name(pipe));
15803         }
15804
15805         drm_connector_list_iter_begin(dev, &conn_iter);
15806         for_each_intel_connector_iter(connector, &conn_iter) {
15807                 if (connector->get_hw_state(connector)) {
15808                         connector->base.dpms = DRM_MODE_DPMS_ON;
15809
15810                         encoder = connector->encoder;
15811                         connector->base.encoder = &encoder->base;
15812
15813                         if (encoder->base.crtc &&
15814                             encoder->base.crtc->state->active) {
15815                                 /*
15816                                  * This has to be done during hardware readout
15817                                  * because anything calling .crtc_disable may
15818                                  * rely on the connector_mask being accurate.
15819                                  */
15820                                 encoder->base.crtc->state->connector_mask |=
15821                                         drm_connector_mask(&connector->base);
15822                                 encoder->base.crtc->state->encoder_mask |=
15823                                         drm_encoder_mask(&encoder->base);
15824                         }
15825
15826                 } else {
15827                         connector->base.dpms = DRM_MODE_DPMS_OFF;
15828                         connector->base.encoder = NULL;
15829                 }
15830                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15831                               connector->base.base.id, connector->base.name,
15832                               enableddisabled(connector->base.encoder));
15833         }
15834         drm_connector_list_iter_end(&conn_iter);
15835
15836         for_each_intel_crtc(dev, crtc) {
15837                 struct intel_crtc_state *crtc_state =
15838                         to_intel_crtc_state(crtc->base.state);
15839                 int min_cdclk = 0;
15840
15841                 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15842                 if (crtc_state->base.active) {
15843                         intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
15844                         crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
15845                         crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
15846                         intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
15847                         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15848
15849                         /*
15850                          * The initial mode needs to be set in order to keep
15851                          * the atomic core happy. It wants a valid mode if the
15852                          * crtc's enabled, so we do the above call.
15853                          *
15854                          * But we don't set all the derived state fully, hence
15855                          * set a flag to indicate that a full recalculation is
15856                          * needed on the next commit.
15857                          */
15858                         crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
15859
15860                         intel_crtc_compute_pixel_rate(crtc_state);
15861
15862                         if (dev_priv->display.modeset_calc_cdclk) {
15863                                 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
15864                                 if (WARN_ON(min_cdclk < 0))
15865                                         min_cdclk = 0;
15866                         }
15867
15868                         drm_calc_timestamping_constants(&crtc->base,
15869                                                         &crtc_state->base.adjusted_mode);
15870                         update_scanline_offset(crtc);
15871                 }
15872
15873                 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
15874                 dev_priv->min_voltage_level[crtc->pipe] =
15875                         crtc_state->min_voltage_level;
15876
15877                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
15878         }
15879 }
15880
15881 static void
15882 get_encoder_power_domains(struct drm_i915_private *dev_priv)
15883 {
15884         struct intel_encoder *encoder;
15885
15886         for_each_intel_encoder(&dev_priv->drm, encoder) {
15887                 u64 get_domains;
15888                 enum intel_display_power_domain domain;
15889                 struct intel_crtc_state *crtc_state;
15890
15891                 if (!encoder->get_power_domains)
15892                         continue;
15893
15894                 /*
15895                  * MST-primary and inactive encoders don't have a crtc state
15896                  * and neither of these require any power domain references.
15897                  */
15898                 if (!encoder->base.crtc)
15899                         continue;
15900
15901                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
15902                 get_domains = encoder->get_power_domains(encoder, crtc_state);
15903                 for_each_power_domain(domain, get_domains)
15904                         intel_display_power_get(dev_priv, domain);
15905         }
15906 }
15907
15908 static void intel_early_display_was(struct drm_i915_private *dev_priv)
15909 {
15910         /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
15911         if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
15912                 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
15913                            DARBF_GATING_DIS);
15914
15915         if (IS_HASWELL(dev_priv)) {
15916                 /*
15917                  * WaRsPkgCStateDisplayPMReq:hsw
15918                  * System hang if this isn't done before disabling all planes!
15919                  */
15920                 I915_WRITE(CHICKEN_PAR1_1,
15921                            I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
15922         }
15923 }
15924
15925 /* Scan out the current hw modeset state,
15926  * and sanitizes it to the current state
15927  */
15928 static void
15929 intel_modeset_setup_hw_state(struct drm_device *dev,
15930                              struct drm_modeset_acquire_ctx *ctx)
15931 {
15932         struct drm_i915_private *dev_priv = to_i915(dev);
15933         struct intel_crtc *crtc;
15934         struct intel_encoder *encoder;
15935         int i;
15936
15937         intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
15938
15939         intel_early_display_was(dev_priv);
15940         intel_modeset_readout_hw_state(dev);
15941
15942         /* HW state is read out, now we need to sanitize this mess. */
15943         get_encoder_power_domains(dev_priv);
15944
15945         /*
15946          * intel_sanitize_plane_mapping() may need to do vblank
15947          * waits, so we need vblank interrupts restored beforehand.
15948          */
15949         for_each_intel_crtc(&dev_priv->drm, crtc) {
15950                 drm_crtc_vblank_reset(&crtc->base);
15951
15952                 if (crtc->active)
15953                         drm_crtc_vblank_on(&crtc->base);
15954         }
15955
15956         intel_sanitize_plane_mapping(dev_priv);
15957
15958         for_each_intel_encoder(dev, encoder)
15959                 intel_sanitize_encoder(encoder);
15960
15961         for_each_intel_crtc(&dev_priv->drm, crtc) {
15962                 intel_sanitize_crtc(crtc, ctx);
15963                 intel_dump_pipe_config(crtc, crtc->config,
15964                                        "[setup_hw_state]");
15965         }
15966
15967         intel_modeset_update_connector_atomic_state(dev);
15968
15969         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15970                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15971
15972                 if (!pll->on || pll->active_mask)
15973                         continue;
15974
15975                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
15976                               pll->info->name);
15977
15978                 pll->info->funcs->disable(dev_priv, pll);
15979                 pll->on = false;
15980         }
15981
15982         if (IS_G4X(dev_priv)) {
15983                 g4x_wm_get_hw_state(dev);
15984                 g4x_wm_sanitize(dev_priv);
15985         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15986                 vlv_wm_get_hw_state(dev);
15987                 vlv_wm_sanitize(dev_priv);
15988         } else if (INTEL_GEN(dev_priv) >= 9) {
15989                 skl_wm_get_hw_state(dev);
15990         } else if (HAS_PCH_SPLIT(dev_priv)) {
15991                 ilk_wm_get_hw_state(dev);
15992         }
15993
15994         for_each_intel_crtc(dev, crtc) {
15995                 u64 put_domains;
15996
15997                 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
15998                 if (WARN_ON(put_domains))
15999                         modeset_put_power_domains(dev_priv, put_domains);
16000         }
16001
16002         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
16003
16004         intel_fbc_init_pipe_state(dev_priv);
16005 }
16006
16007 void intel_display_resume(struct drm_device *dev)
16008 {
16009         struct drm_i915_private *dev_priv = to_i915(dev);
16010         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16011         struct drm_modeset_acquire_ctx ctx;
16012         int ret;
16013
16014         dev_priv->modeset_restore_state = NULL;
16015         if (state)
16016                 state->acquire_ctx = &ctx;
16017
16018         drm_modeset_acquire_init(&ctx, 0);
16019
16020         while (1) {
16021                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16022                 if (ret != -EDEADLK)
16023                         break;
16024
16025                 drm_modeset_backoff(&ctx);
16026         }
16027
16028         if (!ret)
16029                 ret = __intel_display_resume(dev, state, &ctx);
16030
16031         intel_enable_ipc(dev_priv);
16032         drm_modeset_drop_locks(&ctx);
16033         drm_modeset_acquire_fini(&ctx);
16034
16035         if (ret)
16036                 DRM_ERROR("Restoring old state failed with %i\n", ret);
16037         if (state)
16038                 drm_atomic_state_put(state);
16039 }
16040
16041 int intel_connector_register(struct drm_connector *connector)
16042 {
16043         struct intel_connector *intel_connector = to_intel_connector(connector);
16044         int ret;
16045
16046         ret = intel_backlight_device_register(intel_connector);
16047         if (ret)
16048                 goto err;
16049
16050         return 0;
16051
16052 err:
16053         return ret;
16054 }
16055
16056 void intel_connector_unregister(struct drm_connector *connector)
16057 {
16058         struct intel_connector *intel_connector = to_intel_connector(connector);
16059
16060         intel_backlight_device_unregister(intel_connector);
16061         intel_panel_destroy_backlight(connector);
16062 }
16063
16064 static void intel_hpd_poll_fini(struct drm_device *dev)
16065 {
16066         struct intel_connector *connector;
16067         struct drm_connector_list_iter conn_iter;
16068
16069         /* Kill all the work that may have been queued by hpd. */
16070         drm_connector_list_iter_begin(dev, &conn_iter);
16071         for_each_intel_connector_iter(connector, &conn_iter) {
16072                 if (connector->modeset_retry_work.func)
16073                         cancel_work_sync(&connector->modeset_retry_work);
16074                 if (connector->hdcp_shim) {
16075                         cancel_delayed_work_sync(&connector->hdcp_check_work);
16076                         cancel_work_sync(&connector->hdcp_prop_work);
16077                 }
16078         }
16079         drm_connector_list_iter_end(&conn_iter);
16080 }
16081
16082 void intel_modeset_cleanup(struct drm_device *dev)
16083 {
16084         struct drm_i915_private *dev_priv = to_i915(dev);
16085
16086         flush_workqueue(dev_priv->modeset_wq);
16087
16088         flush_work(&dev_priv->atomic_helper.free_work);
16089         WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16090
16091         /*
16092          * Interrupts and polling as the first thing to avoid creating havoc.
16093          * Too much stuff here (turning of connectors, ...) would
16094          * experience fancy races otherwise.
16095          */
16096         intel_irq_uninstall(dev_priv);
16097
16098         /*
16099          * Due to the hpd irq storm handling the hotplug work can re-arm the
16100          * poll handlers. Hence disable polling after hpd handling is shut down.
16101          */
16102         intel_hpd_poll_fini(dev);
16103
16104         /* poll work can call into fbdev, hence clean that up afterwards */
16105         intel_fbdev_fini(dev_priv);
16106
16107         intel_unregister_dsm_handler();
16108
16109         intel_fbc_global_disable(dev_priv);
16110
16111         /* flush any delayed tasks or pending work */
16112         flush_scheduled_work();
16113
16114         drm_mode_config_cleanup(dev);
16115
16116         intel_cleanup_overlay(dev_priv);
16117
16118         intel_teardown_gmbus(dev_priv);
16119
16120         destroy_workqueue(dev_priv->modeset_wq);
16121 }
16122
16123 void intel_connector_attach_encoder(struct intel_connector *connector,
16124                                     struct intel_encoder *encoder)
16125 {
16126         connector->encoder = encoder;
16127         drm_connector_attach_encoder(&connector->base, &encoder->base);
16128 }
16129
16130 /*
16131  * set vga decode state - true == enable VGA decode
16132  */
16133 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
16134 {
16135         unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16136         u16 gmch_ctrl;
16137
16138         if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16139                 DRM_ERROR("failed to read control word\n");
16140                 return -EIO;
16141         }
16142
16143         if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16144                 return 0;
16145
16146         if (state)
16147                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16148         else
16149                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16150
16151         if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16152                 DRM_ERROR("failed to write control word\n");
16153                 return -EIO;
16154         }
16155
16156         return 0;
16157 }
16158
16159 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16160
16161 struct intel_display_error_state {
16162
16163         u32 power_well_driver;
16164
16165         int num_transcoders;
16166
16167         struct intel_cursor_error_state {
16168                 u32 control;
16169                 u32 position;
16170                 u32 base;
16171                 u32 size;
16172         } cursor[I915_MAX_PIPES];
16173
16174         struct intel_pipe_error_state {
16175                 bool power_domain_on;
16176                 u32 source;
16177                 u32 stat;
16178         } pipe[I915_MAX_PIPES];
16179
16180         struct intel_plane_error_state {
16181                 u32 control;
16182                 u32 stride;
16183                 u32 size;
16184                 u32 pos;
16185                 u32 addr;
16186                 u32 surface;
16187                 u32 tile_offset;
16188         } plane[I915_MAX_PIPES];
16189
16190         struct intel_transcoder_error_state {
16191                 bool power_domain_on;
16192                 enum transcoder cpu_transcoder;
16193
16194                 u32 conf;
16195
16196                 u32 htotal;
16197                 u32 hblank;
16198                 u32 hsync;
16199                 u32 vtotal;
16200                 u32 vblank;
16201                 u32 vsync;
16202         } transcoder[4];
16203 };
16204
16205 struct intel_display_error_state *
16206 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16207 {
16208         struct intel_display_error_state *error;
16209         int transcoders[] = {
16210                 TRANSCODER_A,
16211                 TRANSCODER_B,
16212                 TRANSCODER_C,
16213                 TRANSCODER_EDP,
16214         };
16215         int i;
16216
16217         if (INTEL_INFO(dev_priv)->num_pipes == 0)
16218                 return NULL;
16219
16220         error = kzalloc(sizeof(*error), GFP_ATOMIC);
16221         if (error == NULL)
16222                 return NULL;
16223
16224         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16225                 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
16226
16227         for_each_pipe(dev_priv, i) {
16228                 error->pipe[i].power_domain_on =
16229                         __intel_display_power_is_enabled(dev_priv,
16230                                                          POWER_DOMAIN_PIPE(i));
16231                 if (!error->pipe[i].power_domain_on)
16232                         continue;
16233
16234                 error->cursor[i].control = I915_READ(CURCNTR(i));
16235                 error->cursor[i].position = I915_READ(CURPOS(i));
16236                 error->cursor[i].base = I915_READ(CURBASE(i));
16237
16238                 error->plane[i].control = I915_READ(DSPCNTR(i));
16239                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16240                 if (INTEL_GEN(dev_priv) <= 3) {
16241                         error->plane[i].size = I915_READ(DSPSIZE(i));
16242                         error->plane[i].pos = I915_READ(DSPPOS(i));
16243                 }
16244                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16245                         error->plane[i].addr = I915_READ(DSPADDR(i));
16246                 if (INTEL_GEN(dev_priv) >= 4) {
16247                         error->plane[i].surface = I915_READ(DSPSURF(i));
16248                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16249                 }
16250
16251                 error->pipe[i].source = I915_READ(PIPESRC(i));
16252
16253                 if (HAS_GMCH_DISPLAY(dev_priv))
16254                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
16255         }
16256
16257         /* Note: this does not include DSI transcoders. */
16258         error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
16259         if (HAS_DDI(dev_priv))
16260                 error->num_transcoders++; /* Account for eDP. */
16261
16262         for (i = 0; i < error->num_transcoders; i++) {
16263                 enum transcoder cpu_transcoder = transcoders[i];
16264
16265                 error->transcoder[i].power_domain_on =
16266                         __intel_display_power_is_enabled(dev_priv,
16267                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16268                 if (!error->transcoder[i].power_domain_on)
16269                         continue;
16270
16271                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16272
16273                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16274                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16275                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16276                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16277                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16278                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16279                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16280         }
16281
16282         return error;
16283 }
16284
16285 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16286
16287 void
16288 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16289                                 struct intel_display_error_state *error)
16290 {
16291         struct drm_i915_private *dev_priv = m->i915;
16292         int i;
16293
16294         if (!error)
16295                 return;
16296
16297         err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
16298         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16299                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
16300                            error->power_well_driver);
16301         for_each_pipe(dev_priv, i) {
16302                 err_printf(m, "Pipe [%d]:\n", i);
16303                 err_printf(m, "  Power: %s\n",
16304                            onoff(error->pipe[i].power_domain_on));
16305                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
16306                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
16307
16308                 err_printf(m, "Plane [%d]:\n", i);
16309                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16310                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
16311                 if (INTEL_GEN(dev_priv) <= 3) {
16312                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16313                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
16314                 }
16315                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16316                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
16317                 if (INTEL_GEN(dev_priv) >= 4) {
16318                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16319                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
16320                 }
16321
16322                 err_printf(m, "Cursor [%d]:\n", i);
16323                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16324                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16325                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
16326         }
16327
16328         for (i = 0; i < error->num_transcoders; i++) {
16329                 err_printf(m, "CPU transcoder: %s\n",
16330                            transcoder_name(error->transcoder[i].cpu_transcoder));
16331                 err_printf(m, "  Power: %s\n",
16332                            onoff(error->transcoder[i].power_domain_on));
16333                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16334                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16335                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16336                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16337                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16338                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16339                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16340         }
16341 }
16342
16343 #endif