]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/intel_display.c
drm/i915: Refactor icl_is_hdr_plane
[linux.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/reservation.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
35
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 #include <drm/i915_drm.h>
46
47 #include "i915_drv.h"
48 #include "i915_gem_clflush.h"
49 #include "i915_trace.h"
50 #include "intel_drv.h"
51 #include "intel_dsi.h"
52 #include "intel_frontbuffer.h"
53
54 #include "intel_drv.h"
55 #include "intel_dsi.h"
56 #include "intel_frontbuffer.h"
57
58 #include "i915_drv.h"
59 #include "i915_gem_clflush.h"
60 #include "i915_reset.h"
61 #include "i915_trace.h"
62
63 /* Primary plane formats for gen <= 3 */
64 static const u32 i8xx_primary_formats[] = {
65         DRM_FORMAT_C8,
66         DRM_FORMAT_RGB565,
67         DRM_FORMAT_XRGB1555,
68         DRM_FORMAT_XRGB8888,
69 };
70
71 /* Primary plane formats for gen >= 4 */
72 static const u32 i965_primary_formats[] = {
73         DRM_FORMAT_C8,
74         DRM_FORMAT_RGB565,
75         DRM_FORMAT_XRGB8888,
76         DRM_FORMAT_XBGR8888,
77         DRM_FORMAT_XRGB2101010,
78         DRM_FORMAT_XBGR2101010,
79 };
80
81 static const u64 i9xx_format_modifiers[] = {
82         I915_FORMAT_MOD_X_TILED,
83         DRM_FORMAT_MOD_LINEAR,
84         DRM_FORMAT_MOD_INVALID
85 };
86
87 /* Cursor formats */
88 static const u32 intel_cursor_formats[] = {
89         DRM_FORMAT_ARGB8888,
90 };
91
92 static const u64 cursor_format_modifiers[] = {
93         DRM_FORMAT_MOD_LINEAR,
94         DRM_FORMAT_MOD_INVALID
95 };
96
97 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
98                                 struct intel_crtc_state *pipe_config);
99 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
100                                    struct intel_crtc_state *pipe_config);
101
102 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
103                                   struct drm_i915_gem_object *obj,
104                                   struct drm_mode_fb_cmd2 *mode_cmd);
105 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
106 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
107 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
108                                          const struct intel_link_m_n *m_n,
109                                          const struct intel_link_m_n *m2_n2);
110 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
111 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
112 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
113 static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
114 static void vlv_prepare_pll(struct intel_crtc *crtc,
115                             const struct intel_crtc_state *pipe_config);
116 static void chv_prepare_pll(struct intel_crtc *crtc,
117                             const struct intel_crtc_state *pipe_config);
118 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
119 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
120 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
121                                     struct intel_crtc_state *crtc_state);
122 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
123 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
124 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
125 static void intel_modeset_setup_hw_state(struct drm_device *dev,
126                                          struct drm_modeset_acquire_ctx *ctx);
127 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
128
129 struct intel_limit {
130         struct {
131                 int min, max;
132         } dot, vco, n, m, m1, m2, p, p1;
133
134         struct {
135                 int dot_limit;
136                 int p2_slow, p2_fast;
137         } p2;
138 };
139
140 /* returns HPLL frequency in kHz */
141 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
142 {
143         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
144
145         /* Obtain SKU information */
146         mutex_lock(&dev_priv->sb_lock);
147         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
148                 CCK_FUSE_HPLL_FREQ_MASK;
149         mutex_unlock(&dev_priv->sb_lock);
150
151         return vco_freq[hpll_freq] * 1000;
152 }
153
154 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
155                       const char *name, u32 reg, int ref_freq)
156 {
157         u32 val;
158         int divider;
159
160         mutex_lock(&dev_priv->sb_lock);
161         val = vlv_cck_read(dev_priv, reg);
162         mutex_unlock(&dev_priv->sb_lock);
163
164         divider = val & CCK_FREQUENCY_VALUES;
165
166         WARN((val & CCK_FREQUENCY_STATUS) !=
167              (divider << CCK_FREQUENCY_STATUS_SHIFT),
168              "%s change in progress\n", name);
169
170         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
171 }
172
173 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
174                            const char *name, u32 reg)
175 {
176         if (dev_priv->hpll_freq == 0)
177                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
178
179         return vlv_get_cck_clock(dev_priv, name, reg,
180                                  dev_priv->hpll_freq);
181 }
182
183 static void intel_update_czclk(struct drm_i915_private *dev_priv)
184 {
185         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
186                 return;
187
188         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
189                                                       CCK_CZ_CLOCK_CONTROL);
190
191         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
192 }
193
194 static inline u32 /* units of 100MHz */
195 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
196                     const struct intel_crtc_state *pipe_config)
197 {
198         if (HAS_DDI(dev_priv))
199                 return pipe_config->port_clock; /* SPLL */
200         else
201                 return dev_priv->fdi_pll_freq;
202 }
203
204 static const struct intel_limit intel_limits_i8xx_dac = {
205         .dot = { .min = 25000, .max = 350000 },
206         .vco = { .min = 908000, .max = 1512000 },
207         .n = { .min = 2, .max = 16 },
208         .m = { .min = 96, .max = 140 },
209         .m1 = { .min = 18, .max = 26 },
210         .m2 = { .min = 6, .max = 16 },
211         .p = { .min = 4, .max = 128 },
212         .p1 = { .min = 2, .max = 33 },
213         .p2 = { .dot_limit = 165000,
214                 .p2_slow = 4, .p2_fast = 2 },
215 };
216
217 static const struct intel_limit intel_limits_i8xx_dvo = {
218         .dot = { .min = 25000, .max = 350000 },
219         .vco = { .min = 908000, .max = 1512000 },
220         .n = { .min = 2, .max = 16 },
221         .m = { .min = 96, .max = 140 },
222         .m1 = { .min = 18, .max = 26 },
223         .m2 = { .min = 6, .max = 16 },
224         .p = { .min = 4, .max = 128 },
225         .p1 = { .min = 2, .max = 33 },
226         .p2 = { .dot_limit = 165000,
227                 .p2_slow = 4, .p2_fast = 4 },
228 };
229
230 static const struct intel_limit intel_limits_i8xx_lvds = {
231         .dot = { .min = 25000, .max = 350000 },
232         .vco = { .min = 908000, .max = 1512000 },
233         .n = { .min = 2, .max = 16 },
234         .m = { .min = 96, .max = 140 },
235         .m1 = { .min = 18, .max = 26 },
236         .m2 = { .min = 6, .max = 16 },
237         .p = { .min = 4, .max = 128 },
238         .p1 = { .min = 1, .max = 6 },
239         .p2 = { .dot_limit = 165000,
240                 .p2_slow = 14, .p2_fast = 7 },
241 };
242
243 static const struct intel_limit intel_limits_i9xx_sdvo = {
244         .dot = { .min = 20000, .max = 400000 },
245         .vco = { .min = 1400000, .max = 2800000 },
246         .n = { .min = 1, .max = 6 },
247         .m = { .min = 70, .max = 120 },
248         .m1 = { .min = 8, .max = 18 },
249         .m2 = { .min = 3, .max = 7 },
250         .p = { .min = 5, .max = 80 },
251         .p1 = { .min = 1, .max = 8 },
252         .p2 = { .dot_limit = 200000,
253                 .p2_slow = 10, .p2_fast = 5 },
254 };
255
256 static const struct intel_limit intel_limits_i9xx_lvds = {
257         .dot = { .min = 20000, .max = 400000 },
258         .vco = { .min = 1400000, .max = 2800000 },
259         .n = { .min = 1, .max = 6 },
260         .m = { .min = 70, .max = 120 },
261         .m1 = { .min = 8, .max = 18 },
262         .m2 = { .min = 3, .max = 7 },
263         .p = { .min = 7, .max = 98 },
264         .p1 = { .min = 1, .max = 8 },
265         .p2 = { .dot_limit = 112000,
266                 .p2_slow = 14, .p2_fast = 7 },
267 };
268
269
270 static const struct intel_limit intel_limits_g4x_sdvo = {
271         .dot = { .min = 25000, .max = 270000 },
272         .vco = { .min = 1750000, .max = 3500000},
273         .n = { .min = 1, .max = 4 },
274         .m = { .min = 104, .max = 138 },
275         .m1 = { .min = 17, .max = 23 },
276         .m2 = { .min = 5, .max = 11 },
277         .p = { .min = 10, .max = 30 },
278         .p1 = { .min = 1, .max = 3},
279         .p2 = { .dot_limit = 270000,
280                 .p2_slow = 10,
281                 .p2_fast = 10
282         },
283 };
284
285 static const struct intel_limit intel_limits_g4x_hdmi = {
286         .dot = { .min = 22000, .max = 400000 },
287         .vco = { .min = 1750000, .max = 3500000},
288         .n = { .min = 1, .max = 4 },
289         .m = { .min = 104, .max = 138 },
290         .m1 = { .min = 16, .max = 23 },
291         .m2 = { .min = 5, .max = 11 },
292         .p = { .min = 5, .max = 80 },
293         .p1 = { .min = 1, .max = 8},
294         .p2 = { .dot_limit = 165000,
295                 .p2_slow = 10, .p2_fast = 5 },
296 };
297
298 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
299         .dot = { .min = 20000, .max = 115000 },
300         .vco = { .min = 1750000, .max = 3500000 },
301         .n = { .min = 1, .max = 3 },
302         .m = { .min = 104, .max = 138 },
303         .m1 = { .min = 17, .max = 23 },
304         .m2 = { .min = 5, .max = 11 },
305         .p = { .min = 28, .max = 112 },
306         .p1 = { .min = 2, .max = 8 },
307         .p2 = { .dot_limit = 0,
308                 .p2_slow = 14, .p2_fast = 14
309         },
310 };
311
312 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
313         .dot = { .min = 80000, .max = 224000 },
314         .vco = { .min = 1750000, .max = 3500000 },
315         .n = { .min = 1, .max = 3 },
316         .m = { .min = 104, .max = 138 },
317         .m1 = { .min = 17, .max = 23 },
318         .m2 = { .min = 5, .max = 11 },
319         .p = { .min = 14, .max = 42 },
320         .p1 = { .min = 2, .max = 6 },
321         .p2 = { .dot_limit = 0,
322                 .p2_slow = 7, .p2_fast = 7
323         },
324 };
325
326 static const struct intel_limit intel_limits_pineview_sdvo = {
327         .dot = { .min = 20000, .max = 400000},
328         .vco = { .min = 1700000, .max = 3500000 },
329         /* Pineview's Ncounter is a ring counter */
330         .n = { .min = 3, .max = 6 },
331         .m = { .min = 2, .max = 256 },
332         /* Pineview only has one combined m divider, which we treat as m2. */
333         .m1 = { .min = 0, .max = 0 },
334         .m2 = { .min = 0, .max = 254 },
335         .p = { .min = 5, .max = 80 },
336         .p1 = { .min = 1, .max = 8 },
337         .p2 = { .dot_limit = 200000,
338                 .p2_slow = 10, .p2_fast = 5 },
339 };
340
341 static const struct intel_limit intel_limits_pineview_lvds = {
342         .dot = { .min = 20000, .max = 400000 },
343         .vco = { .min = 1700000, .max = 3500000 },
344         .n = { .min = 3, .max = 6 },
345         .m = { .min = 2, .max = 256 },
346         .m1 = { .min = 0, .max = 0 },
347         .m2 = { .min = 0, .max = 254 },
348         .p = { .min = 7, .max = 112 },
349         .p1 = { .min = 1, .max = 8 },
350         .p2 = { .dot_limit = 112000,
351                 .p2_slow = 14, .p2_fast = 14 },
352 };
353
354 /* Ironlake / Sandybridge
355  *
356  * We calculate clock using (register_value + 2) for N/M1/M2, so here
357  * the range value for them is (actual_value - 2).
358  */
359 static const struct intel_limit intel_limits_ironlake_dac = {
360         .dot = { .min = 25000, .max = 350000 },
361         .vco = { .min = 1760000, .max = 3510000 },
362         .n = { .min = 1, .max = 5 },
363         .m = { .min = 79, .max = 127 },
364         .m1 = { .min = 12, .max = 22 },
365         .m2 = { .min = 5, .max = 9 },
366         .p = { .min = 5, .max = 80 },
367         .p1 = { .min = 1, .max = 8 },
368         .p2 = { .dot_limit = 225000,
369                 .p2_slow = 10, .p2_fast = 5 },
370 };
371
372 static const struct intel_limit intel_limits_ironlake_single_lvds = {
373         .dot = { .min = 25000, .max = 350000 },
374         .vco = { .min = 1760000, .max = 3510000 },
375         .n = { .min = 1, .max = 3 },
376         .m = { .min = 79, .max = 118 },
377         .m1 = { .min = 12, .max = 22 },
378         .m2 = { .min = 5, .max = 9 },
379         .p = { .min = 28, .max = 112 },
380         .p1 = { .min = 2, .max = 8 },
381         .p2 = { .dot_limit = 225000,
382                 .p2_slow = 14, .p2_fast = 14 },
383 };
384
385 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
386         .dot = { .min = 25000, .max = 350000 },
387         .vco = { .min = 1760000, .max = 3510000 },
388         .n = { .min = 1, .max = 3 },
389         .m = { .min = 79, .max = 127 },
390         .m1 = { .min = 12, .max = 22 },
391         .m2 = { .min = 5, .max = 9 },
392         .p = { .min = 14, .max = 56 },
393         .p1 = { .min = 2, .max = 8 },
394         .p2 = { .dot_limit = 225000,
395                 .p2_slow = 7, .p2_fast = 7 },
396 };
397
398 /* LVDS 100mhz refclk limits. */
399 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
400         .dot = { .min = 25000, .max = 350000 },
401         .vco = { .min = 1760000, .max = 3510000 },
402         .n = { .min = 1, .max = 2 },
403         .m = { .min = 79, .max = 126 },
404         .m1 = { .min = 12, .max = 22 },
405         .m2 = { .min = 5, .max = 9 },
406         .p = { .min = 28, .max = 112 },
407         .p1 = { .min = 2, .max = 8 },
408         .p2 = { .dot_limit = 225000,
409                 .p2_slow = 14, .p2_fast = 14 },
410 };
411
412 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
413         .dot = { .min = 25000, .max = 350000 },
414         .vco = { .min = 1760000, .max = 3510000 },
415         .n = { .min = 1, .max = 3 },
416         .m = { .min = 79, .max = 126 },
417         .m1 = { .min = 12, .max = 22 },
418         .m2 = { .min = 5, .max = 9 },
419         .p = { .min = 14, .max = 42 },
420         .p1 = { .min = 2, .max = 6 },
421         .p2 = { .dot_limit = 225000,
422                 .p2_slow = 7, .p2_fast = 7 },
423 };
424
425 static const struct intel_limit intel_limits_vlv = {
426          /*
427           * These are the data rate limits (measured in fast clocks)
428           * since those are the strictest limits we have. The fast
429           * clock and actual rate limits are more relaxed, so checking
430           * them would make no difference.
431           */
432         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
433         .vco = { .min = 4000000, .max = 6000000 },
434         .n = { .min = 1, .max = 7 },
435         .m1 = { .min = 2, .max = 3 },
436         .m2 = { .min = 11, .max = 156 },
437         .p1 = { .min = 2, .max = 3 },
438         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
439 };
440
441 static const struct intel_limit intel_limits_chv = {
442         /*
443          * These are the data rate limits (measured in fast clocks)
444          * since those are the strictest limits we have.  The fast
445          * clock and actual rate limits are more relaxed, so checking
446          * them would make no difference.
447          */
448         .dot = { .min = 25000 * 5, .max = 540000 * 5},
449         .vco = { .min = 4800000, .max = 6480000 },
450         .n = { .min = 1, .max = 1 },
451         .m1 = { .min = 2, .max = 2 },
452         .m2 = { .min = 24 << 22, .max = 175 << 22 },
453         .p1 = { .min = 2, .max = 4 },
454         .p2 = { .p2_slow = 1, .p2_fast = 14 },
455 };
456
457 static const struct intel_limit intel_limits_bxt = {
458         /* FIXME: find real dot limits */
459         .dot = { .min = 0, .max = INT_MAX },
460         .vco = { .min = 4800000, .max = 6700000 },
461         .n = { .min = 1, .max = 1 },
462         .m1 = { .min = 2, .max = 2 },
463         /* FIXME: find real m2 limits */
464         .m2 = { .min = 2 << 22, .max = 255 << 22 },
465         .p1 = { .min = 2, .max = 4 },
466         .p2 = { .p2_slow = 1, .p2_fast = 20 },
467 };
468
469 static void
470 skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
471 {
472         if (enable)
473                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
474                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
475         else
476                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
477                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
478                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
479 }
480
481 static bool
482 needs_modeset(const struct drm_crtc_state *state)
483 {
484         return drm_atomic_crtc_needs_modeset(state);
485 }
486
487 /*
488  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
489  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
490  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
491  * The helpers' return value is the rate of the clock that is fed to the
492  * display engine's pipe which can be the above fast dot clock rate or a
493  * divided-down version of it.
494  */
495 /* m1 is reserved as 0 in Pineview, n is a ring counter */
496 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
497 {
498         clock->m = clock->m2 + 2;
499         clock->p = clock->p1 * clock->p2;
500         if (WARN_ON(clock->n == 0 || clock->p == 0))
501                 return 0;
502         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
503         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
504
505         return clock->dot;
506 }
507
508 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
509 {
510         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
511 }
512
513 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
514 {
515         clock->m = i9xx_dpll_compute_m(clock);
516         clock->p = clock->p1 * clock->p2;
517         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
518                 return 0;
519         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
520         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
521
522         return clock->dot;
523 }
524
525 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
526 {
527         clock->m = clock->m1 * clock->m2;
528         clock->p = clock->p1 * clock->p2;
529         if (WARN_ON(clock->n == 0 || clock->p == 0))
530                 return 0;
531         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
532         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
533
534         return clock->dot / 5;
535 }
536
537 int chv_calc_dpll_params(int refclk, struct dpll *clock)
538 {
539         clock->m = clock->m1 * clock->m2;
540         clock->p = clock->p1 * clock->p2;
541         if (WARN_ON(clock->n == 0 || clock->p == 0))
542                 return 0;
543         clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m,
544                                            clock->n << 22);
545         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
546
547         return clock->dot / 5;
548 }
549
550 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
551
552 /*
553  * Returns whether the given set of divisors are valid for a given refclk with
554  * the given connectors.
555  */
556 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
557                                const struct intel_limit *limit,
558                                const struct dpll *clock)
559 {
560         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
561                 INTELPllInvalid("n out of range\n");
562         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
563                 INTELPllInvalid("p1 out of range\n");
564         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
565                 INTELPllInvalid("m2 out of range\n");
566         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
567                 INTELPllInvalid("m1 out of range\n");
568
569         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
570             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
571                 if (clock->m1 <= clock->m2)
572                         INTELPllInvalid("m1 <= m2\n");
573
574         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
575             !IS_GEN9_LP(dev_priv)) {
576                 if (clock->p < limit->p.min || limit->p.max < clock->p)
577                         INTELPllInvalid("p out of range\n");
578                 if (clock->m < limit->m.min || limit->m.max < clock->m)
579                         INTELPllInvalid("m out of range\n");
580         }
581
582         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
583                 INTELPllInvalid("vco out of range\n");
584         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
585          * connector, etc., rather than just a single range.
586          */
587         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
588                 INTELPllInvalid("dot out of range\n");
589
590         return true;
591 }
592
593 static int
594 i9xx_select_p2_div(const struct intel_limit *limit,
595                    const struct intel_crtc_state *crtc_state,
596                    int target)
597 {
598         struct drm_device *dev = crtc_state->base.crtc->dev;
599
600         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
601                 /*
602                  * For LVDS just rely on its current settings for dual-channel.
603                  * We haven't figured out how to reliably set up different
604                  * single/dual channel state, if we even can.
605                  */
606                 if (intel_is_dual_link_lvds(dev))
607                         return limit->p2.p2_fast;
608                 else
609                         return limit->p2.p2_slow;
610         } else {
611                 if (target < limit->p2.dot_limit)
612                         return limit->p2.p2_slow;
613                 else
614                         return limit->p2.p2_fast;
615         }
616 }
617
618 /*
619  * Returns a set of divisors for the desired target clock with the given
620  * refclk, or FALSE.  The returned values represent the clock equation:
621  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
622  *
623  * Target and reference clocks are specified in kHz.
624  *
625  * If match_clock is provided, then best_clock P divider must match the P
626  * divider from @match_clock used for LVDS downclocking.
627  */
628 static bool
629 i9xx_find_best_dpll(const struct intel_limit *limit,
630                     struct intel_crtc_state *crtc_state,
631                     int target, int refclk, struct dpll *match_clock,
632                     struct dpll *best_clock)
633 {
634         struct drm_device *dev = crtc_state->base.crtc->dev;
635         struct dpll clock;
636         int err = target;
637
638         memset(best_clock, 0, sizeof(*best_clock));
639
640         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
641
642         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
643              clock.m1++) {
644                 for (clock.m2 = limit->m2.min;
645                      clock.m2 <= limit->m2.max; clock.m2++) {
646                         if (clock.m2 >= clock.m1)
647                                 break;
648                         for (clock.n = limit->n.min;
649                              clock.n <= limit->n.max; clock.n++) {
650                                 for (clock.p1 = limit->p1.min;
651                                         clock.p1 <= limit->p1.max; clock.p1++) {
652                                         int this_err;
653
654                                         i9xx_calc_dpll_params(refclk, &clock);
655                                         if (!intel_PLL_is_valid(to_i915(dev),
656                                                                 limit,
657                                                                 &clock))
658                                                 continue;
659                                         if (match_clock &&
660                                             clock.p != match_clock->p)
661                                                 continue;
662
663                                         this_err = abs(clock.dot - target);
664                                         if (this_err < err) {
665                                                 *best_clock = clock;
666                                                 err = this_err;
667                                         }
668                                 }
669                         }
670                 }
671         }
672
673         return (err != target);
674 }
675
676 /*
677  * Returns a set of divisors for the desired target clock with the given
678  * refclk, or FALSE.  The returned values represent the clock equation:
679  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
680  *
681  * Target and reference clocks are specified in kHz.
682  *
683  * If match_clock is provided, then best_clock P divider must match the P
684  * divider from @match_clock used for LVDS downclocking.
685  */
686 static bool
687 pnv_find_best_dpll(const struct intel_limit *limit,
688                    struct intel_crtc_state *crtc_state,
689                    int target, int refclk, struct dpll *match_clock,
690                    struct dpll *best_clock)
691 {
692         struct drm_device *dev = crtc_state->base.crtc->dev;
693         struct dpll clock;
694         int err = target;
695
696         memset(best_clock, 0, sizeof(*best_clock));
697
698         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
699
700         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
701              clock.m1++) {
702                 for (clock.m2 = limit->m2.min;
703                      clock.m2 <= limit->m2.max; clock.m2++) {
704                         for (clock.n = limit->n.min;
705                              clock.n <= limit->n.max; clock.n++) {
706                                 for (clock.p1 = limit->p1.min;
707                                         clock.p1 <= limit->p1.max; clock.p1++) {
708                                         int this_err;
709
710                                         pnv_calc_dpll_params(refclk, &clock);
711                                         if (!intel_PLL_is_valid(to_i915(dev),
712                                                                 limit,
713                                                                 &clock))
714                                                 continue;
715                                         if (match_clock &&
716                                             clock.p != match_clock->p)
717                                                 continue;
718
719                                         this_err = abs(clock.dot - target);
720                                         if (this_err < err) {
721                                                 *best_clock = clock;
722                                                 err = this_err;
723                                         }
724                                 }
725                         }
726                 }
727         }
728
729         return (err != target);
730 }
731
732 /*
733  * Returns a set of divisors for the desired target clock with the given
734  * refclk, or FALSE.  The returned values represent the clock equation:
735  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
736  *
737  * Target and reference clocks are specified in kHz.
738  *
739  * If match_clock is provided, then best_clock P divider must match the P
740  * divider from @match_clock used for LVDS downclocking.
741  */
742 static bool
743 g4x_find_best_dpll(const struct intel_limit *limit,
744                    struct intel_crtc_state *crtc_state,
745                    int target, int refclk, struct dpll *match_clock,
746                    struct dpll *best_clock)
747 {
748         struct drm_device *dev = crtc_state->base.crtc->dev;
749         struct dpll clock;
750         int max_n;
751         bool found = false;
752         /* approximately equals target * 0.00585 */
753         int err_most = (target >> 8) + (target >> 9);
754
755         memset(best_clock, 0, sizeof(*best_clock));
756
757         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
758
759         max_n = limit->n.max;
760         /* based on hardware requirement, prefer smaller n to precision */
761         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
762                 /* based on hardware requirement, prefere larger m1,m2 */
763                 for (clock.m1 = limit->m1.max;
764                      clock.m1 >= limit->m1.min; clock.m1--) {
765                         for (clock.m2 = limit->m2.max;
766                              clock.m2 >= limit->m2.min; clock.m2--) {
767                                 for (clock.p1 = limit->p1.max;
768                                      clock.p1 >= limit->p1.min; clock.p1--) {
769                                         int this_err;
770
771                                         i9xx_calc_dpll_params(refclk, &clock);
772                                         if (!intel_PLL_is_valid(to_i915(dev),
773                                                                 limit,
774                                                                 &clock))
775                                                 continue;
776
777                                         this_err = abs(clock.dot - target);
778                                         if (this_err < err_most) {
779                                                 *best_clock = clock;
780                                                 err_most = this_err;
781                                                 max_n = clock.n;
782                                                 found = true;
783                                         }
784                                 }
785                         }
786                 }
787         }
788         return found;
789 }
790
791 /*
792  * Check if the calculated PLL configuration is more optimal compared to the
793  * best configuration and error found so far. Return the calculated error.
794  */
795 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
796                                const struct dpll *calculated_clock,
797                                const struct dpll *best_clock,
798                                unsigned int best_error_ppm,
799                                unsigned int *error_ppm)
800 {
801         /*
802          * For CHV ignore the error and consider only the P value.
803          * Prefer a bigger P value based on HW requirements.
804          */
805         if (IS_CHERRYVIEW(to_i915(dev))) {
806                 *error_ppm = 0;
807
808                 return calculated_clock->p > best_clock->p;
809         }
810
811         if (WARN_ON_ONCE(!target_freq))
812                 return false;
813
814         *error_ppm = div_u64(1000000ULL *
815                                 abs(target_freq - calculated_clock->dot),
816                              target_freq);
817         /*
818          * Prefer a better P value over a better (smaller) error if the error
819          * is small. Ensure this preference for future configurations too by
820          * setting the error to 0.
821          */
822         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
823                 *error_ppm = 0;
824
825                 return true;
826         }
827
828         return *error_ppm + 10 < best_error_ppm;
829 }
830
831 /*
832  * Returns a set of divisors for the desired target clock with the given
833  * refclk, or FALSE.  The returned values represent the clock equation:
834  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
835  */
836 static bool
837 vlv_find_best_dpll(const struct intel_limit *limit,
838                    struct intel_crtc_state *crtc_state,
839                    int target, int refclk, struct dpll *match_clock,
840                    struct dpll *best_clock)
841 {
842         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
843         struct drm_device *dev = crtc->base.dev;
844         struct dpll clock;
845         unsigned int bestppm = 1000000;
846         /* min update 19.2 MHz */
847         int max_n = min(limit->n.max, refclk / 19200);
848         bool found = false;
849
850         target *= 5; /* fast clock */
851
852         memset(best_clock, 0, sizeof(*best_clock));
853
854         /* based on hardware requirement, prefer smaller n to precision */
855         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
856                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
857                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
858                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
859                                 clock.p = clock.p1 * clock.p2;
860                                 /* based on hardware requirement, prefer bigger m1,m2 values */
861                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
862                                         unsigned int ppm;
863
864                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
865                                                                      refclk * clock.m1);
866
867                                         vlv_calc_dpll_params(refclk, &clock);
868
869                                         if (!intel_PLL_is_valid(to_i915(dev),
870                                                                 limit,
871                                                                 &clock))
872                                                 continue;
873
874                                         if (!vlv_PLL_is_optimal(dev, target,
875                                                                 &clock,
876                                                                 best_clock,
877                                                                 bestppm, &ppm))
878                                                 continue;
879
880                                         *best_clock = clock;
881                                         bestppm = ppm;
882                                         found = true;
883                                 }
884                         }
885                 }
886         }
887
888         return found;
889 }
890
891 /*
892  * Returns a set of divisors for the desired target clock with the given
893  * refclk, or FALSE.  The returned values represent the clock equation:
894  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
895  */
896 static bool
897 chv_find_best_dpll(const struct intel_limit *limit,
898                    struct intel_crtc_state *crtc_state,
899                    int target, int refclk, struct dpll *match_clock,
900                    struct dpll *best_clock)
901 {
902         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
903         struct drm_device *dev = crtc->base.dev;
904         unsigned int best_error_ppm;
905         struct dpll clock;
906         u64 m2;
907         int found = false;
908
909         memset(best_clock, 0, sizeof(*best_clock));
910         best_error_ppm = 1000000;
911
912         /*
913          * Based on hardware doc, the n always set to 1, and m1 always
914          * set to 2.  If requires to support 200Mhz refclk, we need to
915          * revisit this because n may not 1 anymore.
916          */
917         clock.n = 1, clock.m1 = 2;
918         target *= 5;    /* fast clock */
919
920         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
921                 for (clock.p2 = limit->p2.p2_fast;
922                                 clock.p2 >= limit->p2.p2_slow;
923                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
924                         unsigned int error_ppm;
925
926                         clock.p = clock.p1 * clock.p2;
927
928                         m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p *
929                                         clock.n) << 22, refclk * clock.m1);
930
931                         if (m2 > INT_MAX/clock.m1)
932                                 continue;
933
934                         clock.m2 = m2;
935
936                         chv_calc_dpll_params(refclk, &clock);
937
938                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
939                                 continue;
940
941                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
942                                                 best_error_ppm, &error_ppm))
943                                 continue;
944
945                         *best_clock = clock;
946                         best_error_ppm = error_ppm;
947                         found = true;
948                 }
949         }
950
951         return found;
952 }
953
954 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
955                         struct dpll *best_clock)
956 {
957         int refclk = 100000;
958         const struct intel_limit *limit = &intel_limits_bxt;
959
960         return chv_find_best_dpll(limit, crtc_state,
961                                   target_clock, refclk, NULL, best_clock);
962 }
963
964 bool intel_crtc_active(struct intel_crtc *crtc)
965 {
966         /* Be paranoid as we can arrive here with only partial
967          * state retrieved from the hardware during setup.
968          *
969          * We can ditch the adjusted_mode.crtc_clock check as soon
970          * as Haswell has gained clock readout/fastboot support.
971          *
972          * We can ditch the crtc->primary->state->fb check as soon as we can
973          * properly reconstruct framebuffers.
974          *
975          * FIXME: The intel_crtc->active here should be switched to
976          * crtc->state->active once we have proper CRTC states wired up
977          * for atomic.
978          */
979         return crtc->active && crtc->base.primary->state->fb &&
980                 crtc->config->base.adjusted_mode.crtc_clock;
981 }
982
983 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
984                                              enum pipe pipe)
985 {
986         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
987
988         return crtc->config->cpu_transcoder;
989 }
990
991 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
992                                     enum pipe pipe)
993 {
994         i915_reg_t reg = PIPEDSL(pipe);
995         u32 line1, line2;
996         u32 line_mask;
997
998         if (IS_GEN(dev_priv, 2))
999                 line_mask = DSL_LINEMASK_GEN2;
1000         else
1001                 line_mask = DSL_LINEMASK_GEN3;
1002
1003         line1 = I915_READ(reg) & line_mask;
1004         msleep(5);
1005         line2 = I915_READ(reg) & line_mask;
1006
1007         return line1 != line2;
1008 }
1009
1010 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1011 {
1012         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1013         enum pipe pipe = crtc->pipe;
1014
1015         /* Wait for the display line to settle/start moving */
1016         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1017                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1018                           pipe_name(pipe), onoff(state));
1019 }
1020
1021 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1022 {
1023         wait_for_pipe_scanline_moving(crtc, false);
1024 }
1025
1026 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1027 {
1028         wait_for_pipe_scanline_moving(crtc, true);
1029 }
1030
1031 static void
1032 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1033 {
1034         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1035         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1036
1037         if (INTEL_GEN(dev_priv) >= 4) {
1038                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1039                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1040
1041                 /* Wait for the Pipe State to go off */
1042                 if (intel_wait_for_register(dev_priv,
1043                                             reg, I965_PIPECONF_ACTIVE, 0,
1044                                             100))
1045                         WARN(1, "pipe_off wait timed out\n");
1046         } else {
1047                 intel_wait_for_pipe_scanline_stopped(crtc);
1048         }
1049 }
1050
1051 /* Only for pre-ILK configs */
1052 void assert_pll(struct drm_i915_private *dev_priv,
1053                 enum pipe pipe, bool state)
1054 {
1055         u32 val;
1056         bool cur_state;
1057
1058         val = I915_READ(DPLL(pipe));
1059         cur_state = !!(val & DPLL_VCO_ENABLE);
1060         I915_STATE_WARN(cur_state != state,
1061              "PLL state assertion failure (expected %s, current %s)\n",
1062                         onoff(state), onoff(cur_state));
1063 }
1064
1065 /* XXX: the dsi pll is shared between MIPI DSI ports */
1066 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1067 {
1068         u32 val;
1069         bool cur_state;
1070
1071         mutex_lock(&dev_priv->sb_lock);
1072         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1073         mutex_unlock(&dev_priv->sb_lock);
1074
1075         cur_state = val & DSI_PLL_VCO_EN;
1076         I915_STATE_WARN(cur_state != state,
1077              "DSI PLL state assertion failure (expected %s, current %s)\n",
1078                         onoff(state), onoff(cur_state));
1079 }
1080
1081 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1082                           enum pipe pipe, bool state)
1083 {
1084         bool cur_state;
1085         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1086                                                                       pipe);
1087
1088         if (HAS_DDI(dev_priv)) {
1089                 /* DDI does not have a specific FDI_TX register */
1090                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1091                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1092         } else {
1093                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1094                 cur_state = !!(val & FDI_TX_ENABLE);
1095         }
1096         I915_STATE_WARN(cur_state != state,
1097              "FDI TX state assertion failure (expected %s, current %s)\n",
1098                         onoff(state), onoff(cur_state));
1099 }
1100 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1101 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1102
1103 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1104                           enum pipe pipe, bool state)
1105 {
1106         u32 val;
1107         bool cur_state;
1108
1109         val = I915_READ(FDI_RX_CTL(pipe));
1110         cur_state = !!(val & FDI_RX_ENABLE);
1111         I915_STATE_WARN(cur_state != state,
1112              "FDI RX state assertion failure (expected %s, current %s)\n",
1113                         onoff(state), onoff(cur_state));
1114 }
1115 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1116 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1117
1118 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1119                                       enum pipe pipe)
1120 {
1121         u32 val;
1122
1123         /* ILK FDI PLL is always enabled */
1124         if (IS_GEN(dev_priv, 5))
1125                 return;
1126
1127         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1128         if (HAS_DDI(dev_priv))
1129                 return;
1130
1131         val = I915_READ(FDI_TX_CTL(pipe));
1132         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1133 }
1134
1135 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1136                        enum pipe pipe, bool state)
1137 {
1138         u32 val;
1139         bool cur_state;
1140
1141         val = I915_READ(FDI_RX_CTL(pipe));
1142         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1143         I915_STATE_WARN(cur_state != state,
1144              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1145                         onoff(state), onoff(cur_state));
1146 }
1147
1148 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1149 {
1150         i915_reg_t pp_reg;
1151         u32 val;
1152         enum pipe panel_pipe = INVALID_PIPE;
1153         bool locked = true;
1154
1155         if (WARN_ON(HAS_DDI(dev_priv)))
1156                 return;
1157
1158         if (HAS_PCH_SPLIT(dev_priv)) {
1159                 u32 port_sel;
1160
1161                 pp_reg = PP_CONTROL(0);
1162                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1163
1164                 switch (port_sel) {
1165                 case PANEL_PORT_SELECT_LVDS:
1166                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1167                         break;
1168                 case PANEL_PORT_SELECT_DPA:
1169                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1170                         break;
1171                 case PANEL_PORT_SELECT_DPC:
1172                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1173                         break;
1174                 case PANEL_PORT_SELECT_DPD:
1175                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1176                         break;
1177                 default:
1178                         MISSING_CASE(port_sel);
1179                         break;
1180                 }
1181         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1182                 /* presumably write lock depends on pipe, not port select */
1183                 pp_reg = PP_CONTROL(pipe);
1184                 panel_pipe = pipe;
1185         } else {
1186                 u32 port_sel;
1187
1188                 pp_reg = PP_CONTROL(0);
1189                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1190
1191                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1192                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1193         }
1194
1195         val = I915_READ(pp_reg);
1196         if (!(val & PANEL_POWER_ON) ||
1197             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1198                 locked = false;
1199
1200         I915_STATE_WARN(panel_pipe == pipe && locked,
1201              "panel assertion failure, pipe %c regs locked\n",
1202              pipe_name(pipe));
1203 }
1204
1205 void assert_pipe(struct drm_i915_private *dev_priv,
1206                  enum pipe pipe, bool state)
1207 {
1208         bool cur_state;
1209         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1210                                                                       pipe);
1211         enum intel_display_power_domain power_domain;
1212         intel_wakeref_t wakeref;
1213
1214         /* we keep both pipes enabled on 830 */
1215         if (IS_I830(dev_priv))
1216                 state = true;
1217
1218         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1219         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1220         if (wakeref) {
1221                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1222                 cur_state = !!(val & PIPECONF_ENABLE);
1223
1224                 intel_display_power_put(dev_priv, power_domain, wakeref);
1225         } else {
1226                 cur_state = false;
1227         }
1228
1229         I915_STATE_WARN(cur_state != state,
1230              "pipe %c assertion failure (expected %s, current %s)\n",
1231                         pipe_name(pipe), onoff(state), onoff(cur_state));
1232 }
1233
1234 static void assert_plane(struct intel_plane *plane, bool state)
1235 {
1236         enum pipe pipe;
1237         bool cur_state;
1238
1239         cur_state = plane->get_hw_state(plane, &pipe);
1240
1241         I915_STATE_WARN(cur_state != state,
1242                         "%s assertion failure (expected %s, current %s)\n",
1243                         plane->base.name, onoff(state), onoff(cur_state));
1244 }
1245
1246 #define assert_plane_enabled(p) assert_plane(p, true)
1247 #define assert_plane_disabled(p) assert_plane(p, false)
1248
1249 static void assert_planes_disabled(struct intel_crtc *crtc)
1250 {
1251         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1252         struct intel_plane *plane;
1253
1254         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1255                 assert_plane_disabled(plane);
1256 }
1257
1258 static void assert_vblank_disabled(struct drm_crtc *crtc)
1259 {
1260         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1261                 drm_crtc_vblank_put(crtc);
1262 }
1263
1264 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1265                                     enum pipe pipe)
1266 {
1267         u32 val;
1268         bool enabled;
1269
1270         val = I915_READ(PCH_TRANSCONF(pipe));
1271         enabled = !!(val & TRANS_ENABLE);
1272         I915_STATE_WARN(enabled,
1273              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1274              pipe_name(pipe));
1275 }
1276
1277 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1278                                    enum pipe pipe, enum port port,
1279                                    i915_reg_t dp_reg)
1280 {
1281         enum pipe port_pipe;
1282         bool state;
1283
1284         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1285
1286         I915_STATE_WARN(state && port_pipe == pipe,
1287                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1288                         port_name(port), pipe_name(pipe));
1289
1290         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1291                         "IBX PCH DP %c still using transcoder B\n",
1292                         port_name(port));
1293 }
1294
1295 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1296                                      enum pipe pipe, enum port port,
1297                                      i915_reg_t hdmi_reg)
1298 {
1299         enum pipe port_pipe;
1300         bool state;
1301
1302         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1303
1304         I915_STATE_WARN(state && port_pipe == pipe,
1305                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1306                         port_name(port), pipe_name(pipe));
1307
1308         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1309                         "IBX PCH HDMI %c still using transcoder B\n",
1310                         port_name(port));
1311 }
1312
1313 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1314                                       enum pipe pipe)
1315 {
1316         enum pipe port_pipe;
1317
1318         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1319         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1320         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1321
1322         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1323                         port_pipe == pipe,
1324                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1325                         pipe_name(pipe));
1326
1327         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1328                         port_pipe == pipe,
1329                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1330                         pipe_name(pipe));
1331
1332         /* PCH SDVOB multiplex with HDMIB */
1333         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1334         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1335         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1336 }
1337
1338 static void _vlv_enable_pll(struct intel_crtc *crtc,
1339                             const struct intel_crtc_state *pipe_config)
1340 {
1341         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1342         enum pipe pipe = crtc->pipe;
1343
1344         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1345         POSTING_READ(DPLL(pipe));
1346         udelay(150);
1347
1348         if (intel_wait_for_register(dev_priv,
1349                                     DPLL(pipe),
1350                                     DPLL_LOCK_VLV,
1351                                     DPLL_LOCK_VLV,
1352                                     1))
1353                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1354 }
1355
1356 static void vlv_enable_pll(struct intel_crtc *crtc,
1357                            const struct intel_crtc_state *pipe_config)
1358 {
1359         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1360         enum pipe pipe = crtc->pipe;
1361
1362         assert_pipe_disabled(dev_priv, pipe);
1363
1364         /* PLL is protected by panel, make sure we can write it */
1365         assert_panel_unlocked(dev_priv, pipe);
1366
1367         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1368                 _vlv_enable_pll(crtc, pipe_config);
1369
1370         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1371         POSTING_READ(DPLL_MD(pipe));
1372 }
1373
1374
1375 static void _chv_enable_pll(struct intel_crtc *crtc,
1376                             const struct intel_crtc_state *pipe_config)
1377 {
1378         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1379         enum pipe pipe = crtc->pipe;
1380         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1381         u32 tmp;
1382
1383         mutex_lock(&dev_priv->sb_lock);
1384
1385         /* Enable back the 10bit clock to display controller */
1386         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1387         tmp |= DPIO_DCLKP_EN;
1388         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1389
1390         mutex_unlock(&dev_priv->sb_lock);
1391
1392         /*
1393          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1394          */
1395         udelay(1);
1396
1397         /* Enable PLL */
1398         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1399
1400         /* Check PLL is locked */
1401         if (intel_wait_for_register(dev_priv,
1402                                     DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1403                                     1))
1404                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1405 }
1406
1407 static void chv_enable_pll(struct intel_crtc *crtc,
1408                            const struct intel_crtc_state *pipe_config)
1409 {
1410         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1411         enum pipe pipe = crtc->pipe;
1412
1413         assert_pipe_disabled(dev_priv, pipe);
1414
1415         /* PLL is protected by panel, make sure we can write it */
1416         assert_panel_unlocked(dev_priv, pipe);
1417
1418         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1419                 _chv_enable_pll(crtc, pipe_config);
1420
1421         if (pipe != PIPE_A) {
1422                 /*
1423                  * WaPixelRepeatModeFixForC0:chv
1424                  *
1425                  * DPLLCMD is AWOL. Use chicken bits to propagate
1426                  * the value from DPLLBMD to either pipe B or C.
1427                  */
1428                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1429                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1430                 I915_WRITE(CBR4_VLV, 0);
1431                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1432
1433                 /*
1434                  * DPLLB VGA mode also seems to cause problems.
1435                  * We should always have it disabled.
1436                  */
1437                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1438         } else {
1439                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1440                 POSTING_READ(DPLL_MD(pipe));
1441         }
1442 }
1443
1444 static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
1445 {
1446         struct intel_crtc *crtc;
1447         int count = 0;
1448
1449         for_each_intel_crtc(&dev_priv->drm, crtc) {
1450                 count += crtc->base.state->active &&
1451                         intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1452         }
1453
1454         return count;
1455 }
1456
1457 static void i9xx_enable_pll(struct intel_crtc *crtc,
1458                             const struct intel_crtc_state *crtc_state)
1459 {
1460         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1461         i915_reg_t reg = DPLL(crtc->pipe);
1462         u32 dpll = crtc_state->dpll_hw_state.dpll;
1463         int i;
1464
1465         assert_pipe_disabled(dev_priv, crtc->pipe);
1466
1467         /* PLL is protected by panel, make sure we can write it */
1468         if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
1469                 assert_panel_unlocked(dev_priv, crtc->pipe);
1470
1471         /* Enable DVO 2x clock on both PLLs if necessary */
1472         if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
1473                 /*
1474                  * It appears to be important that we don't enable this
1475                  * for the current pipe before otherwise configuring the
1476                  * PLL. No idea how this should be handled if multiple
1477                  * DVO outputs are enabled simultaneosly.
1478                  */
1479                 dpll |= DPLL_DVO_2X_MODE;
1480                 I915_WRITE(DPLL(!crtc->pipe),
1481                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1482         }
1483
1484         /*
1485          * Apparently we need to have VGA mode enabled prior to changing
1486          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1487          * dividers, even though the register value does change.
1488          */
1489         I915_WRITE(reg, 0);
1490
1491         I915_WRITE(reg, dpll);
1492
1493         /* Wait for the clocks to stabilize. */
1494         POSTING_READ(reg);
1495         udelay(150);
1496
1497         if (INTEL_GEN(dev_priv) >= 4) {
1498                 I915_WRITE(DPLL_MD(crtc->pipe),
1499                            crtc_state->dpll_hw_state.dpll_md);
1500         } else {
1501                 /* The pixel multiplier can only be updated once the
1502                  * DPLL is enabled and the clocks are stable.
1503                  *
1504                  * So write it again.
1505                  */
1506                 I915_WRITE(reg, dpll);
1507         }
1508
1509         /* We do this three times for luck */
1510         for (i = 0; i < 3; i++) {
1511                 I915_WRITE(reg, dpll);
1512                 POSTING_READ(reg);
1513                 udelay(150); /* wait for warmup */
1514         }
1515 }
1516
1517 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1518 {
1519         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1520         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1521         enum pipe pipe = crtc->pipe;
1522
1523         /* Disable DVO 2x clock on both PLLs if necessary */
1524         if (IS_I830(dev_priv) &&
1525             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
1526             !intel_num_dvo_pipes(dev_priv)) {
1527                 I915_WRITE(DPLL(PIPE_B),
1528                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1529                 I915_WRITE(DPLL(PIPE_A),
1530                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1531         }
1532
1533         /* Don't disable pipe or pipe PLLs if needed */
1534         if (IS_I830(dev_priv))
1535                 return;
1536
1537         /* Make sure the pipe isn't still relying on us */
1538         assert_pipe_disabled(dev_priv, pipe);
1539
1540         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1541         POSTING_READ(DPLL(pipe));
1542 }
1543
1544 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1545 {
1546         u32 val;
1547
1548         /* Make sure the pipe isn't still relying on us */
1549         assert_pipe_disabled(dev_priv, pipe);
1550
1551         val = DPLL_INTEGRATED_REF_CLK_VLV |
1552                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1553         if (pipe != PIPE_A)
1554                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1555
1556         I915_WRITE(DPLL(pipe), val);
1557         POSTING_READ(DPLL(pipe));
1558 }
1559
1560 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1561 {
1562         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1563         u32 val;
1564
1565         /* Make sure the pipe isn't still relying on us */
1566         assert_pipe_disabled(dev_priv, pipe);
1567
1568         val = DPLL_SSC_REF_CLK_CHV |
1569                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1570         if (pipe != PIPE_A)
1571                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1572
1573         I915_WRITE(DPLL(pipe), val);
1574         POSTING_READ(DPLL(pipe));
1575
1576         mutex_lock(&dev_priv->sb_lock);
1577
1578         /* Disable 10bit clock to display controller */
1579         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1580         val &= ~DPIO_DCLKP_EN;
1581         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1582
1583         mutex_unlock(&dev_priv->sb_lock);
1584 }
1585
1586 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1587                          struct intel_digital_port *dport,
1588                          unsigned int expected_mask)
1589 {
1590         u32 port_mask;
1591         i915_reg_t dpll_reg;
1592
1593         switch (dport->base.port) {
1594         case PORT_B:
1595                 port_mask = DPLL_PORTB_READY_MASK;
1596                 dpll_reg = DPLL(0);
1597                 break;
1598         case PORT_C:
1599                 port_mask = DPLL_PORTC_READY_MASK;
1600                 dpll_reg = DPLL(0);
1601                 expected_mask <<= 4;
1602                 break;
1603         case PORT_D:
1604                 port_mask = DPLL_PORTD_READY_MASK;
1605                 dpll_reg = DPIO_PHY_STATUS;
1606                 break;
1607         default:
1608                 BUG();
1609         }
1610
1611         if (intel_wait_for_register(dev_priv,
1612                                     dpll_reg, port_mask, expected_mask,
1613                                     1000))
1614                 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1615                      port_name(dport->base.port),
1616                      I915_READ(dpll_reg) & port_mask, expected_mask);
1617 }
1618
1619 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1620 {
1621         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1622         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1623         enum pipe pipe = crtc->pipe;
1624         i915_reg_t reg;
1625         u32 val, pipeconf_val;
1626
1627         /* Make sure PCH DPLL is enabled */
1628         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1629
1630         /* FDI must be feeding us bits for PCH ports */
1631         assert_fdi_tx_enabled(dev_priv, pipe);
1632         assert_fdi_rx_enabled(dev_priv, pipe);
1633
1634         if (HAS_PCH_CPT(dev_priv)) {
1635                 /* Workaround: Set the timing override bit before enabling the
1636                  * pch transcoder. */
1637                 reg = TRANS_CHICKEN2(pipe);
1638                 val = I915_READ(reg);
1639                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1640                 I915_WRITE(reg, val);
1641         }
1642
1643         reg = PCH_TRANSCONF(pipe);
1644         val = I915_READ(reg);
1645         pipeconf_val = I915_READ(PIPECONF(pipe));
1646
1647         if (HAS_PCH_IBX(dev_priv)) {
1648                 /*
1649                  * Make the BPC in transcoder be consistent with
1650                  * that in pipeconf reg. For HDMI we must use 8bpc
1651                  * here for both 8bpc and 12bpc.
1652                  */
1653                 val &= ~PIPECONF_BPC_MASK;
1654                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1655                         val |= PIPECONF_8BPC;
1656                 else
1657                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1658         }
1659
1660         val &= ~TRANS_INTERLACE_MASK;
1661         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1662                 if (HAS_PCH_IBX(dev_priv) &&
1663                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1664                         val |= TRANS_LEGACY_INTERLACED_ILK;
1665                 else
1666                         val |= TRANS_INTERLACED;
1667         else
1668                 val |= TRANS_PROGRESSIVE;
1669
1670         I915_WRITE(reg, val | TRANS_ENABLE);
1671         if (intel_wait_for_register(dev_priv,
1672                                     reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1673                                     100))
1674                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1675 }
1676
1677 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1678                                       enum transcoder cpu_transcoder)
1679 {
1680         u32 val, pipeconf_val;
1681
1682         /* FDI must be feeding us bits for PCH ports */
1683         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1684         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1685
1686         /* Workaround: set timing override bit. */
1687         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1688         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1689         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1690
1691         val = TRANS_ENABLE;
1692         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1693
1694         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1695             PIPECONF_INTERLACED_ILK)
1696                 val |= TRANS_INTERLACED;
1697         else
1698                 val |= TRANS_PROGRESSIVE;
1699
1700         I915_WRITE(LPT_TRANSCONF, val);
1701         if (intel_wait_for_register(dev_priv,
1702                                     LPT_TRANSCONF,
1703                                     TRANS_STATE_ENABLE,
1704                                     TRANS_STATE_ENABLE,
1705                                     100))
1706                 DRM_ERROR("Failed to enable PCH transcoder\n");
1707 }
1708
1709 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1710                                             enum pipe pipe)
1711 {
1712         i915_reg_t reg;
1713         u32 val;
1714
1715         /* FDI relies on the transcoder */
1716         assert_fdi_tx_disabled(dev_priv, pipe);
1717         assert_fdi_rx_disabled(dev_priv, pipe);
1718
1719         /* Ports must be off as well */
1720         assert_pch_ports_disabled(dev_priv, pipe);
1721
1722         reg = PCH_TRANSCONF(pipe);
1723         val = I915_READ(reg);
1724         val &= ~TRANS_ENABLE;
1725         I915_WRITE(reg, val);
1726         /* wait for PCH transcoder off, transcoder state */
1727         if (intel_wait_for_register(dev_priv,
1728                                     reg, TRANS_STATE_ENABLE, 0,
1729                                     50))
1730                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1731
1732         if (HAS_PCH_CPT(dev_priv)) {
1733                 /* Workaround: Clear the timing override chicken bit again. */
1734                 reg = TRANS_CHICKEN2(pipe);
1735                 val = I915_READ(reg);
1736                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1737                 I915_WRITE(reg, val);
1738         }
1739 }
1740
1741 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1742 {
1743         u32 val;
1744
1745         val = I915_READ(LPT_TRANSCONF);
1746         val &= ~TRANS_ENABLE;
1747         I915_WRITE(LPT_TRANSCONF, val);
1748         /* wait for PCH transcoder off, transcoder state */
1749         if (intel_wait_for_register(dev_priv,
1750                                     LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1751                                     50))
1752                 DRM_ERROR("Failed to disable PCH transcoder\n");
1753
1754         /* Workaround: clear timing override bit. */
1755         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1756         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1757         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1758 }
1759
1760 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1761 {
1762         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1763
1764         if (HAS_PCH_LPT(dev_priv))
1765                 return PIPE_A;
1766         else
1767                 return crtc->pipe;
1768 }
1769
1770 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1771 {
1772         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1773
1774         /*
1775          * On i965gm the hardware frame counter reads
1776          * zero when the TV encoder is enabled :(
1777          */
1778         if (IS_I965GM(dev_priv) &&
1779             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1780                 return 0;
1781
1782         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1783                 return 0xffffffff; /* full 32 bit counter */
1784         else if (INTEL_GEN(dev_priv) >= 3)
1785                 return 0xffffff; /* only 24 bits of frame count */
1786         else
1787                 return 0; /* Gen2 doesn't have a hardware frame counter */
1788 }
1789
1790 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1791 {
1792         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1793
1794         drm_crtc_set_max_vblank_count(&crtc->base,
1795                                       intel_crtc_max_vblank_count(crtc_state));
1796         drm_crtc_vblank_on(&crtc->base);
1797 }
1798
1799 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1800 {
1801         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1802         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1803         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1804         enum pipe pipe = crtc->pipe;
1805         i915_reg_t reg;
1806         u32 val;
1807
1808         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1809
1810         assert_planes_disabled(crtc);
1811
1812         /*
1813          * A pipe without a PLL won't actually be able to drive bits from
1814          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1815          * need the check.
1816          */
1817         if (HAS_GMCH(dev_priv)) {
1818                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1819                         assert_dsi_pll_enabled(dev_priv);
1820                 else
1821                         assert_pll_enabled(dev_priv, pipe);
1822         } else {
1823                 if (new_crtc_state->has_pch_encoder) {
1824                         /* if driving the PCH, we need FDI enabled */
1825                         assert_fdi_rx_pll_enabled(dev_priv,
1826                                                   intel_crtc_pch_transcoder(crtc));
1827                         assert_fdi_tx_pll_enabled(dev_priv,
1828                                                   (enum pipe) cpu_transcoder);
1829                 }
1830                 /* FIXME: assert CPU port conditions for SNB+ */
1831         }
1832
1833         reg = PIPECONF(cpu_transcoder);
1834         val = I915_READ(reg);
1835         if (val & PIPECONF_ENABLE) {
1836                 /* we keep both pipes enabled on 830 */
1837                 WARN_ON(!IS_I830(dev_priv));
1838                 return;
1839         }
1840
1841         I915_WRITE(reg, val | PIPECONF_ENABLE);
1842         POSTING_READ(reg);
1843
1844         /*
1845          * Until the pipe starts PIPEDSL reads will return a stale value,
1846          * which causes an apparent vblank timestamp jump when PIPEDSL
1847          * resets to its proper value. That also messes up the frame count
1848          * when it's derived from the timestamps. So let's wait for the
1849          * pipe to start properly before we call drm_crtc_vblank_on()
1850          */
1851         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1852                 intel_wait_for_pipe_scanline_moving(crtc);
1853 }
1854
1855 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1856 {
1857         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1858         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1859         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1860         enum pipe pipe = crtc->pipe;
1861         i915_reg_t reg;
1862         u32 val;
1863
1864         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1865
1866         /*
1867          * Make sure planes won't keep trying to pump pixels to us,
1868          * or we might hang the display.
1869          */
1870         assert_planes_disabled(crtc);
1871
1872         reg = PIPECONF(cpu_transcoder);
1873         val = I915_READ(reg);
1874         if ((val & PIPECONF_ENABLE) == 0)
1875                 return;
1876
1877         /*
1878          * Double wide has implications for planes
1879          * so best keep it disabled when not needed.
1880          */
1881         if (old_crtc_state->double_wide)
1882                 val &= ~PIPECONF_DOUBLE_WIDE;
1883
1884         /* Don't disable pipe or pipe PLLs if needed */
1885         if (!IS_I830(dev_priv))
1886                 val &= ~PIPECONF_ENABLE;
1887
1888         I915_WRITE(reg, val);
1889         if ((val & PIPECONF_ENABLE) == 0)
1890                 intel_wait_for_pipe_off(old_crtc_state);
1891 }
1892
1893 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1894 {
1895         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1896 }
1897
1898 static unsigned int
1899 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1900 {
1901         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1902         unsigned int cpp = fb->format->cpp[color_plane];
1903
1904         switch (fb->modifier) {
1905         case DRM_FORMAT_MOD_LINEAR:
1906                 return cpp;
1907         case I915_FORMAT_MOD_X_TILED:
1908                 if (IS_GEN(dev_priv, 2))
1909                         return 128;
1910                 else
1911                         return 512;
1912         case I915_FORMAT_MOD_Y_TILED_CCS:
1913                 if (color_plane == 1)
1914                         return 128;
1915                 /* fall through */
1916         case I915_FORMAT_MOD_Y_TILED:
1917                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1918                         return 128;
1919                 else
1920                         return 512;
1921         case I915_FORMAT_MOD_Yf_TILED_CCS:
1922                 if (color_plane == 1)
1923                         return 128;
1924                 /* fall through */
1925         case I915_FORMAT_MOD_Yf_TILED:
1926                 switch (cpp) {
1927                 case 1:
1928                         return 64;
1929                 case 2:
1930                 case 4:
1931                         return 128;
1932                 case 8:
1933                 case 16:
1934                         return 256;
1935                 default:
1936                         MISSING_CASE(cpp);
1937                         return cpp;
1938                 }
1939                 break;
1940         default:
1941                 MISSING_CASE(fb->modifier);
1942                 return cpp;
1943         }
1944 }
1945
1946 static unsigned int
1947 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1948 {
1949         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1950                 return 1;
1951         else
1952                 return intel_tile_size(to_i915(fb->dev)) /
1953                         intel_tile_width_bytes(fb, color_plane);
1954 }
1955
1956 /* Return the tile dimensions in pixel units */
1957 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1958                             unsigned int *tile_width,
1959                             unsigned int *tile_height)
1960 {
1961         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1962         unsigned int cpp = fb->format->cpp[color_plane];
1963
1964         *tile_width = tile_width_bytes / cpp;
1965         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1966 }
1967
1968 unsigned int
1969 intel_fb_align_height(const struct drm_framebuffer *fb,
1970                       int color_plane, unsigned int height)
1971 {
1972         unsigned int tile_height = intel_tile_height(fb, color_plane);
1973
1974         return ALIGN(height, tile_height);
1975 }
1976
1977 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1978 {
1979         unsigned int size = 0;
1980         int i;
1981
1982         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1983                 size += rot_info->plane[i].width * rot_info->plane[i].height;
1984
1985         return size;
1986 }
1987
1988 static void
1989 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1990                         const struct drm_framebuffer *fb,
1991                         unsigned int rotation)
1992 {
1993         view->type = I915_GGTT_VIEW_NORMAL;
1994         if (drm_rotation_90_or_270(rotation)) {
1995                 view->type = I915_GGTT_VIEW_ROTATED;
1996                 view->rotated = to_intel_framebuffer(fb)->rot_info;
1997         }
1998 }
1999
2000 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2001 {
2002         if (IS_I830(dev_priv))
2003                 return 16 * 1024;
2004         else if (IS_I85X(dev_priv))
2005                 return 256;
2006         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2007                 return 32;
2008         else
2009                 return 4 * 1024;
2010 }
2011
2012 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2013 {
2014         if (INTEL_GEN(dev_priv) >= 9)
2015                 return 256 * 1024;
2016         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2017                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2018                 return 128 * 1024;
2019         else if (INTEL_GEN(dev_priv) >= 4)
2020                 return 4 * 1024;
2021         else
2022                 return 0;
2023 }
2024
2025 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2026                                          int color_plane)
2027 {
2028         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2029
2030         /* AUX_DIST needs only 4K alignment */
2031         if (color_plane == 1)
2032                 return 4096;
2033
2034         switch (fb->modifier) {
2035         case DRM_FORMAT_MOD_LINEAR:
2036                 return intel_linear_alignment(dev_priv);
2037         case I915_FORMAT_MOD_X_TILED:
2038                 if (INTEL_GEN(dev_priv) >= 9)
2039                         return 256 * 1024;
2040                 return 0;
2041         case I915_FORMAT_MOD_Y_TILED_CCS:
2042         case I915_FORMAT_MOD_Yf_TILED_CCS:
2043         case I915_FORMAT_MOD_Y_TILED:
2044         case I915_FORMAT_MOD_Yf_TILED:
2045                 return 1 * 1024 * 1024;
2046         default:
2047                 MISSING_CASE(fb->modifier);
2048                 return 0;
2049         }
2050 }
2051
2052 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2053 {
2054         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2055         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2056
2057         return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
2058 }
2059
2060 struct i915_vma *
2061 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2062                            const struct i915_ggtt_view *view,
2063                            bool uses_fence,
2064                            unsigned long *out_flags)
2065 {
2066         struct drm_device *dev = fb->dev;
2067         struct drm_i915_private *dev_priv = to_i915(dev);
2068         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2069         intel_wakeref_t wakeref;
2070         struct i915_vma *vma;
2071         unsigned int pinctl;
2072         u32 alignment;
2073
2074         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2075
2076         alignment = intel_surf_alignment(fb, 0);
2077
2078         /* Note that the w/a also requires 64 PTE of padding following the
2079          * bo. We currently fill all unused PTE with the shadow page and so
2080          * we should always have valid PTE following the scanout preventing
2081          * the VT-d warning.
2082          */
2083         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2084                 alignment = 256 * 1024;
2085
2086         /*
2087          * Global gtt pte registers are special registers which actually forward
2088          * writes to a chunk of system memory. Which means that there is no risk
2089          * that the register values disappear as soon as we call
2090          * intel_runtime_pm_put(), so it is correct to wrap only the
2091          * pin/unpin/fence and not more.
2092          */
2093         wakeref = intel_runtime_pm_get(dev_priv);
2094
2095         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2096
2097         pinctl = 0;
2098
2099         /* Valleyview is definitely limited to scanning out the first
2100          * 512MiB. Lets presume this behaviour was inherited from the
2101          * g4x display engine and that all earlier gen are similarly
2102          * limited. Testing suggests that it is a little more
2103          * complicated than this. For example, Cherryview appears quite
2104          * happy to scanout from anywhere within its global aperture.
2105          */
2106         if (HAS_GMCH(dev_priv))
2107                 pinctl |= PIN_MAPPABLE;
2108
2109         vma = i915_gem_object_pin_to_display_plane(obj,
2110                                                    alignment, view, pinctl);
2111         if (IS_ERR(vma))
2112                 goto err;
2113
2114         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2115                 int ret;
2116
2117                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2118                  * fence, whereas 965+ only requires a fence if using
2119                  * framebuffer compression.  For simplicity, we always, when
2120                  * possible, install a fence as the cost is not that onerous.
2121                  *
2122                  * If we fail to fence the tiled scanout, then either the
2123                  * modeset will reject the change (which is highly unlikely as
2124                  * the affected systems, all but one, do not have unmappable
2125                  * space) or we will not be able to enable full powersaving
2126                  * techniques (also likely not to apply due to various limits
2127                  * FBC and the like impose on the size of the buffer, which
2128                  * presumably we violated anyway with this unmappable buffer).
2129                  * Anyway, it is presumably better to stumble onwards with
2130                  * something and try to run the system in a "less than optimal"
2131                  * mode that matches the user configuration.
2132                  */
2133                 ret = i915_vma_pin_fence(vma);
2134                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2135                         i915_gem_object_unpin_from_display_plane(vma);
2136                         vma = ERR_PTR(ret);
2137                         goto err;
2138                 }
2139
2140                 if (ret == 0 && vma->fence)
2141                         *out_flags |= PLANE_HAS_FENCE;
2142         }
2143
2144         i915_vma_get(vma);
2145 err:
2146         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2147
2148         intel_runtime_pm_put(dev_priv, wakeref);
2149         return vma;
2150 }
2151
2152 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2153 {
2154         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2155
2156         if (flags & PLANE_HAS_FENCE)
2157                 i915_vma_unpin_fence(vma);
2158         i915_gem_object_unpin_from_display_plane(vma);
2159         i915_vma_put(vma);
2160 }
2161
2162 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2163                           unsigned int rotation)
2164 {
2165         if (drm_rotation_90_or_270(rotation))
2166                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2167         else
2168                 return fb->pitches[color_plane];
2169 }
2170
2171 /*
2172  * Convert the x/y offsets into a linear offset.
2173  * Only valid with 0/180 degree rotation, which is fine since linear
2174  * offset is only used with linear buffers on pre-hsw and tiled buffers
2175  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2176  */
2177 u32 intel_fb_xy_to_linear(int x, int y,
2178                           const struct intel_plane_state *state,
2179                           int color_plane)
2180 {
2181         const struct drm_framebuffer *fb = state->base.fb;
2182         unsigned int cpp = fb->format->cpp[color_plane];
2183         unsigned int pitch = state->color_plane[color_plane].stride;
2184
2185         return y * pitch + x * cpp;
2186 }
2187
2188 /*
2189  * Add the x/y offsets derived from fb->offsets[] to the user
2190  * specified plane src x/y offsets. The resulting x/y offsets
2191  * specify the start of scanout from the beginning of the gtt mapping.
2192  */
2193 void intel_add_fb_offsets(int *x, int *y,
2194                           const struct intel_plane_state *state,
2195                           int color_plane)
2196
2197 {
2198         const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2199         unsigned int rotation = state->base.rotation;
2200
2201         if (drm_rotation_90_or_270(rotation)) {
2202                 *x += intel_fb->rotated[color_plane].x;
2203                 *y += intel_fb->rotated[color_plane].y;
2204         } else {
2205                 *x += intel_fb->normal[color_plane].x;
2206                 *y += intel_fb->normal[color_plane].y;
2207         }
2208 }
2209
2210 static u32 intel_adjust_tile_offset(int *x, int *y,
2211                                     unsigned int tile_width,
2212                                     unsigned int tile_height,
2213                                     unsigned int tile_size,
2214                                     unsigned int pitch_tiles,
2215                                     u32 old_offset,
2216                                     u32 new_offset)
2217 {
2218         unsigned int pitch_pixels = pitch_tiles * tile_width;
2219         unsigned int tiles;
2220
2221         WARN_ON(old_offset & (tile_size - 1));
2222         WARN_ON(new_offset & (tile_size - 1));
2223         WARN_ON(new_offset > old_offset);
2224
2225         tiles = (old_offset - new_offset) / tile_size;
2226
2227         *y += tiles / pitch_tiles * tile_height;
2228         *x += tiles % pitch_tiles * tile_width;
2229
2230         /* minimize x in case it got needlessly big */
2231         *y += *x / pitch_pixels * tile_height;
2232         *x %= pitch_pixels;
2233
2234         return new_offset;
2235 }
2236
2237 static bool is_surface_linear(u64 modifier, int color_plane)
2238 {
2239         return modifier == DRM_FORMAT_MOD_LINEAR;
2240 }
2241
2242 static u32 intel_adjust_aligned_offset(int *x, int *y,
2243                                        const struct drm_framebuffer *fb,
2244                                        int color_plane,
2245                                        unsigned int rotation,
2246                                        unsigned int pitch,
2247                                        u32 old_offset, u32 new_offset)
2248 {
2249         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2250         unsigned int cpp = fb->format->cpp[color_plane];
2251
2252         WARN_ON(new_offset > old_offset);
2253
2254         if (!is_surface_linear(fb->modifier, color_plane)) {
2255                 unsigned int tile_size, tile_width, tile_height;
2256                 unsigned int pitch_tiles;
2257
2258                 tile_size = intel_tile_size(dev_priv);
2259                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2260
2261                 if (drm_rotation_90_or_270(rotation)) {
2262                         pitch_tiles = pitch / tile_height;
2263                         swap(tile_width, tile_height);
2264                 } else {
2265                         pitch_tiles = pitch / (tile_width * cpp);
2266                 }
2267
2268                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2269                                          tile_size, pitch_tiles,
2270                                          old_offset, new_offset);
2271         } else {
2272                 old_offset += *y * pitch + *x * cpp;
2273
2274                 *y = (old_offset - new_offset) / pitch;
2275                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2276         }
2277
2278         return new_offset;
2279 }
2280
2281 /*
2282  * Adjust the tile offset by moving the difference into
2283  * the x/y offsets.
2284  */
2285 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2286                                              const struct intel_plane_state *state,
2287                                              int color_plane,
2288                                              u32 old_offset, u32 new_offset)
2289 {
2290         return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2291                                            state->base.rotation,
2292                                            state->color_plane[color_plane].stride,
2293                                            old_offset, new_offset);
2294 }
2295
2296 /*
2297  * Computes the aligned offset to the base tile and adjusts
2298  * x, y. bytes per pixel is assumed to be a power-of-two.
2299  *
2300  * In the 90/270 rotated case, x and y are assumed
2301  * to be already rotated to match the rotated GTT view, and
2302  * pitch is the tile_height aligned framebuffer height.
2303  *
2304  * This function is used when computing the derived information
2305  * under intel_framebuffer, so using any of that information
2306  * here is not allowed. Anything under drm_framebuffer can be
2307  * used. This is why the user has to pass in the pitch since it
2308  * is specified in the rotated orientation.
2309  */
2310 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2311                                         int *x, int *y,
2312                                         const struct drm_framebuffer *fb,
2313                                         int color_plane,
2314                                         unsigned int pitch,
2315                                         unsigned int rotation,
2316                                         u32 alignment)
2317 {
2318         unsigned int cpp = fb->format->cpp[color_plane];
2319         u32 offset, offset_aligned;
2320
2321         if (alignment)
2322                 alignment--;
2323
2324         if (!is_surface_linear(fb->modifier, color_plane)) {
2325                 unsigned int tile_size, tile_width, tile_height;
2326                 unsigned int tile_rows, tiles, pitch_tiles;
2327
2328                 tile_size = intel_tile_size(dev_priv);
2329                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2330
2331                 if (drm_rotation_90_or_270(rotation)) {
2332                         pitch_tiles = pitch / tile_height;
2333                         swap(tile_width, tile_height);
2334                 } else {
2335                         pitch_tiles = pitch / (tile_width * cpp);
2336                 }
2337
2338                 tile_rows = *y / tile_height;
2339                 *y %= tile_height;
2340
2341                 tiles = *x / tile_width;
2342                 *x %= tile_width;
2343
2344                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2345                 offset_aligned = offset & ~alignment;
2346
2347                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2348                                          tile_size, pitch_tiles,
2349                                          offset, offset_aligned);
2350         } else {
2351                 offset = *y * pitch + *x * cpp;
2352                 offset_aligned = offset & ~alignment;
2353
2354                 *y = (offset & alignment) / pitch;
2355                 *x = ((offset & alignment) - *y * pitch) / cpp;
2356         }
2357
2358         return offset_aligned;
2359 }
2360
2361 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2362                                               const struct intel_plane_state *state,
2363                                               int color_plane)
2364 {
2365         struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2366         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2367         const struct drm_framebuffer *fb = state->base.fb;
2368         unsigned int rotation = state->base.rotation;
2369         int pitch = state->color_plane[color_plane].stride;
2370         u32 alignment;
2371
2372         if (intel_plane->id == PLANE_CURSOR)
2373                 alignment = intel_cursor_alignment(dev_priv);
2374         else
2375                 alignment = intel_surf_alignment(fb, color_plane);
2376
2377         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2378                                             pitch, rotation, alignment);
2379 }
2380
2381 /* Convert the fb->offset[] into x/y offsets */
2382 static int intel_fb_offset_to_xy(int *x, int *y,
2383                                  const struct drm_framebuffer *fb,
2384                                  int color_plane)
2385 {
2386         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2387         unsigned int height;
2388
2389         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2390             fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2391                 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2392                               fb->offsets[color_plane], color_plane);
2393                 return -EINVAL;
2394         }
2395
2396         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2397         height = ALIGN(height, intel_tile_height(fb, color_plane));
2398
2399         /* Catch potential overflows early */
2400         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2401                             fb->offsets[color_plane])) {
2402                 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2403                               fb->offsets[color_plane], fb->pitches[color_plane],
2404                               color_plane);
2405                 return -ERANGE;
2406         }
2407
2408         *x = 0;
2409         *y = 0;
2410
2411         intel_adjust_aligned_offset(x, y,
2412                                     fb, color_plane, DRM_MODE_ROTATE_0,
2413                                     fb->pitches[color_plane],
2414                                     fb->offsets[color_plane], 0);
2415
2416         return 0;
2417 }
2418
2419 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2420 {
2421         switch (fb_modifier) {
2422         case I915_FORMAT_MOD_X_TILED:
2423                 return I915_TILING_X;
2424         case I915_FORMAT_MOD_Y_TILED:
2425         case I915_FORMAT_MOD_Y_TILED_CCS:
2426                 return I915_TILING_Y;
2427         default:
2428                 return I915_TILING_NONE;
2429         }
2430 }
2431
2432 /*
2433  * From the Sky Lake PRM:
2434  * "The Color Control Surface (CCS) contains the compression status of
2435  *  the cache-line pairs. The compression state of the cache-line pair
2436  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2437  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2438  *  cache-line-pairs. CCS is always Y tiled."
2439  *
2440  * Since cache line pairs refers to horizontally adjacent cache lines,
2441  * each cache line in the CCS corresponds to an area of 32x16 cache
2442  * lines on the main surface. Since each pixel is 4 bytes, this gives
2443  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2444  * main surface.
2445  */
2446 static const struct drm_format_info ccs_formats[] = {
2447         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2448         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2449         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2450         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2451 };
2452
2453 static const struct drm_format_info *
2454 lookup_format_info(const struct drm_format_info formats[],
2455                    int num_formats, u32 format)
2456 {
2457         int i;
2458
2459         for (i = 0; i < num_formats; i++) {
2460                 if (formats[i].format == format)
2461                         return &formats[i];
2462         }
2463
2464         return NULL;
2465 }
2466
2467 static const struct drm_format_info *
2468 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2469 {
2470         switch (cmd->modifier[0]) {
2471         case I915_FORMAT_MOD_Y_TILED_CCS:
2472         case I915_FORMAT_MOD_Yf_TILED_CCS:
2473                 return lookup_format_info(ccs_formats,
2474                                           ARRAY_SIZE(ccs_formats),
2475                                           cmd->pixel_format);
2476         default:
2477                 return NULL;
2478         }
2479 }
2480
2481 bool is_ccs_modifier(u64 modifier)
2482 {
2483         return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2484                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2485 }
2486
2487 static int
2488 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2489                    struct drm_framebuffer *fb)
2490 {
2491         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2492         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2493         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2494         u32 gtt_offset_rotated = 0;
2495         unsigned int max_size = 0;
2496         int i, num_planes = fb->format->num_planes;
2497         unsigned int tile_size = intel_tile_size(dev_priv);
2498
2499         for (i = 0; i < num_planes; i++) {
2500                 unsigned int width, height;
2501                 unsigned int cpp, size;
2502                 u32 offset;
2503                 int x, y;
2504                 int ret;
2505
2506                 cpp = fb->format->cpp[i];
2507                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2508                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2509
2510                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2511                 if (ret) {
2512                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2513                                       i, fb->offsets[i]);
2514                         return ret;
2515                 }
2516
2517                 if (is_ccs_modifier(fb->modifier) && i == 1) {
2518                         int hsub = fb->format->hsub;
2519                         int vsub = fb->format->vsub;
2520                         int tile_width, tile_height;
2521                         int main_x, main_y;
2522                         int ccs_x, ccs_y;
2523
2524                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2525                         tile_width *= hsub;
2526                         tile_height *= vsub;
2527
2528                         ccs_x = (x * hsub) % tile_width;
2529                         ccs_y = (y * vsub) % tile_height;
2530                         main_x = intel_fb->normal[0].x % tile_width;
2531                         main_y = intel_fb->normal[0].y % tile_height;
2532
2533                         /*
2534                          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2535                          * x/y offsets must match between CCS and the main surface.
2536                          */
2537                         if (main_x != ccs_x || main_y != ccs_y) {
2538                                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2539                                               main_x, main_y,
2540                                               ccs_x, ccs_y,
2541                                               intel_fb->normal[0].x,
2542                                               intel_fb->normal[0].y,
2543                                               x, y);
2544                                 return -EINVAL;
2545                         }
2546                 }
2547
2548                 /*
2549                  * The fence (if used) is aligned to the start of the object
2550                  * so having the framebuffer wrap around across the edge of the
2551                  * fenced region doesn't really work. We have no API to configure
2552                  * the fence start offset within the object (nor could we probably
2553                  * on gen2/3). So it's just easier if we just require that the
2554                  * fb layout agrees with the fence layout. We already check that the
2555                  * fb stride matches the fence stride elsewhere.
2556                  */
2557                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2558                     (x + width) * cpp > fb->pitches[i]) {
2559                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2560                                       i, fb->offsets[i]);
2561                         return -EINVAL;
2562                 }
2563
2564                 /*
2565                  * First pixel of the framebuffer from
2566                  * the start of the normal gtt mapping.
2567                  */
2568                 intel_fb->normal[i].x = x;
2569                 intel_fb->normal[i].y = y;
2570
2571                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2572                                                       fb->pitches[i],
2573                                                       DRM_MODE_ROTATE_0,
2574                                                       tile_size);
2575                 offset /= tile_size;
2576
2577                 if (!is_surface_linear(fb->modifier, i)) {
2578                         unsigned int tile_width, tile_height;
2579                         unsigned int pitch_tiles;
2580                         struct drm_rect r;
2581
2582                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2583
2584                         rot_info->plane[i].offset = offset;
2585                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2586                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2587                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2588
2589                         intel_fb->rotated[i].pitch =
2590                                 rot_info->plane[i].height * tile_height;
2591
2592                         /* how many tiles does this plane need */
2593                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2594                         /*
2595                          * If the plane isn't horizontally tile aligned,
2596                          * we need one more tile.
2597                          */
2598                         if (x != 0)
2599                                 size++;
2600
2601                         /* rotate the x/y offsets to match the GTT view */
2602                         r.x1 = x;
2603                         r.y1 = y;
2604                         r.x2 = x + width;
2605                         r.y2 = y + height;
2606                         drm_rect_rotate(&r,
2607                                         rot_info->plane[i].width * tile_width,
2608                                         rot_info->plane[i].height * tile_height,
2609                                         DRM_MODE_ROTATE_270);
2610                         x = r.x1;
2611                         y = r.y1;
2612
2613                         /* rotate the tile dimensions to match the GTT view */
2614                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2615                         swap(tile_width, tile_height);
2616
2617                         /*
2618                          * We only keep the x/y offsets, so push all of the
2619                          * gtt offset into the x/y offsets.
2620                          */
2621                         intel_adjust_tile_offset(&x, &y,
2622                                                  tile_width, tile_height,
2623                                                  tile_size, pitch_tiles,
2624                                                  gtt_offset_rotated * tile_size, 0);
2625
2626                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2627
2628                         /*
2629                          * First pixel of the framebuffer from
2630                          * the start of the rotated gtt mapping.
2631                          */
2632                         intel_fb->rotated[i].x = x;
2633                         intel_fb->rotated[i].y = y;
2634                 } else {
2635                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2636                                             x * cpp, tile_size);
2637                 }
2638
2639                 /* how many tiles in total needed in the bo */
2640                 max_size = max(max_size, offset + size);
2641         }
2642
2643         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2644                 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2645                               mul_u32_u32(max_size, tile_size), obj->base.size);
2646                 return -EINVAL;
2647         }
2648
2649         return 0;
2650 }
2651
2652 static int i9xx_format_to_fourcc(int format)
2653 {
2654         switch (format) {
2655         case DISPPLANE_8BPP:
2656                 return DRM_FORMAT_C8;
2657         case DISPPLANE_BGRX555:
2658                 return DRM_FORMAT_XRGB1555;
2659         case DISPPLANE_BGRX565:
2660                 return DRM_FORMAT_RGB565;
2661         default:
2662         case DISPPLANE_BGRX888:
2663                 return DRM_FORMAT_XRGB8888;
2664         case DISPPLANE_RGBX888:
2665                 return DRM_FORMAT_XBGR8888;
2666         case DISPPLANE_BGRX101010:
2667                 return DRM_FORMAT_XRGB2101010;
2668         case DISPPLANE_RGBX101010:
2669                 return DRM_FORMAT_XBGR2101010;
2670         }
2671 }
2672
2673 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2674 {
2675         switch (format) {
2676         case PLANE_CTL_FORMAT_RGB_565:
2677                 return DRM_FORMAT_RGB565;
2678         case PLANE_CTL_FORMAT_NV12:
2679                 return DRM_FORMAT_NV12;
2680         case PLANE_CTL_FORMAT_P010:
2681                 return DRM_FORMAT_P010;
2682         case PLANE_CTL_FORMAT_P012:
2683                 return DRM_FORMAT_P012;
2684         case PLANE_CTL_FORMAT_P016:
2685                 return DRM_FORMAT_P016;
2686         case PLANE_CTL_FORMAT_Y210:
2687                 return DRM_FORMAT_Y210;
2688         case PLANE_CTL_FORMAT_Y212:
2689                 return DRM_FORMAT_Y212;
2690         case PLANE_CTL_FORMAT_Y216:
2691                 return DRM_FORMAT_Y216;
2692         case PLANE_CTL_FORMAT_Y410:
2693                 return DRM_FORMAT_Y410;
2694         case PLANE_CTL_FORMAT_Y412:
2695                 return DRM_FORMAT_Y412;
2696         case PLANE_CTL_FORMAT_Y416:
2697                 return DRM_FORMAT_Y416;
2698         default:
2699         case PLANE_CTL_FORMAT_XRGB_8888:
2700                 if (rgb_order) {
2701                         if (alpha)
2702                                 return DRM_FORMAT_ABGR8888;
2703                         else
2704                                 return DRM_FORMAT_XBGR8888;
2705                 } else {
2706                         if (alpha)
2707                                 return DRM_FORMAT_ARGB8888;
2708                         else
2709                                 return DRM_FORMAT_XRGB8888;
2710                 }
2711         case PLANE_CTL_FORMAT_XRGB_2101010:
2712                 if (rgb_order)
2713                         return DRM_FORMAT_XBGR2101010;
2714                 else
2715                         return DRM_FORMAT_XRGB2101010;
2716         }
2717 }
2718
2719 static bool
2720 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2721                               struct intel_initial_plane_config *plane_config)
2722 {
2723         struct drm_device *dev = crtc->base.dev;
2724         struct drm_i915_private *dev_priv = to_i915(dev);
2725         struct drm_i915_gem_object *obj = NULL;
2726         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2727         struct drm_framebuffer *fb = &plane_config->fb->base;
2728         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2729         u32 size_aligned = round_up(plane_config->base + plane_config->size,
2730                                     PAGE_SIZE);
2731
2732         size_aligned -= base_aligned;
2733
2734         if (plane_config->size == 0)
2735                 return false;
2736
2737         /* If the FB is too big, just don't use it since fbdev is not very
2738          * important and we should probably use that space with FBC or other
2739          * features. */
2740         if (size_aligned * 2 > dev_priv->stolen_usable_size)
2741                 return false;
2742
2743         switch (fb->modifier) {
2744         case DRM_FORMAT_MOD_LINEAR:
2745         case I915_FORMAT_MOD_X_TILED:
2746         case I915_FORMAT_MOD_Y_TILED:
2747                 break;
2748         default:
2749                 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
2750                                  fb->modifier);
2751                 return false;
2752         }
2753
2754         mutex_lock(&dev->struct_mutex);
2755         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
2756                                                              base_aligned,
2757                                                              base_aligned,
2758                                                              size_aligned);
2759         mutex_unlock(&dev->struct_mutex);
2760         if (!obj)
2761                 return false;
2762
2763         switch (plane_config->tiling) {
2764         case I915_TILING_NONE:
2765                 break;
2766         case I915_TILING_X:
2767         case I915_TILING_Y:
2768                 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
2769                 break;
2770         default:
2771                 MISSING_CASE(plane_config->tiling);
2772                 return false;
2773         }
2774
2775         mode_cmd.pixel_format = fb->format->format;
2776         mode_cmd.width = fb->width;
2777         mode_cmd.height = fb->height;
2778         mode_cmd.pitches[0] = fb->pitches[0];
2779         mode_cmd.modifier[0] = fb->modifier;
2780         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2781
2782         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
2783                 DRM_DEBUG_KMS("intel fb init failed\n");
2784                 goto out_unref_obj;
2785         }
2786
2787
2788         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2789         return true;
2790
2791 out_unref_obj:
2792         i915_gem_object_put(obj);
2793         return false;
2794 }
2795
2796 static void
2797 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2798                         struct intel_plane_state *plane_state,
2799                         bool visible)
2800 {
2801         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2802
2803         plane_state->base.visible = visible;
2804
2805         if (visible)
2806                 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
2807         else
2808                 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
2809 }
2810
2811 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
2812 {
2813         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2814         struct drm_plane *plane;
2815
2816         /*
2817          * Active_planes aliases if multiple "primary" or cursor planes
2818          * have been used on the same (or wrong) pipe. plane_mask uses
2819          * unique ids, hence we can use that to reconstruct active_planes.
2820          */
2821         crtc_state->active_planes = 0;
2822
2823         drm_for_each_plane_mask(plane, &dev_priv->drm,
2824                                 crtc_state->base.plane_mask)
2825                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2826 }
2827
2828 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2829                                          struct intel_plane *plane)
2830 {
2831         struct intel_crtc_state *crtc_state =
2832                 to_intel_crtc_state(crtc->base.state);
2833         struct intel_plane_state *plane_state =
2834                 to_intel_plane_state(plane->base.state);
2835
2836         DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2837                       plane->base.base.id, plane->base.name,
2838                       crtc->base.base.id, crtc->base.name);
2839
2840         intel_set_plane_visible(crtc_state, plane_state, false);
2841         fixup_active_planes(crtc_state);
2842
2843         if (plane->id == PLANE_PRIMARY)
2844                 intel_pre_disable_primary_noatomic(&crtc->base);
2845
2846         trace_intel_disable_plane(&plane->base, crtc);
2847         plane->disable_plane(plane, crtc_state);
2848 }
2849
2850 static void
2851 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2852                              struct intel_initial_plane_config *plane_config)
2853 {
2854         struct drm_device *dev = intel_crtc->base.dev;
2855         struct drm_i915_private *dev_priv = to_i915(dev);
2856         struct drm_crtc *c;
2857         struct drm_i915_gem_object *obj;
2858         struct drm_plane *primary = intel_crtc->base.primary;
2859         struct drm_plane_state *plane_state = primary->state;
2860         struct intel_plane *intel_plane = to_intel_plane(primary);
2861         struct intel_plane_state *intel_state =
2862                 to_intel_plane_state(plane_state);
2863         struct drm_framebuffer *fb;
2864
2865         if (!plane_config->fb)
2866                 return;
2867
2868         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2869                 fb = &plane_config->fb->base;
2870                 goto valid_fb;
2871         }
2872
2873         kfree(plane_config->fb);
2874
2875         /*
2876          * Failed to alloc the obj, check to see if we should share
2877          * an fb with another CRTC instead
2878          */
2879         for_each_crtc(dev, c) {
2880                 struct intel_plane_state *state;
2881
2882                 if (c == &intel_crtc->base)
2883                         continue;
2884
2885                 if (!to_intel_crtc(c)->active)
2886                         continue;
2887
2888                 state = to_intel_plane_state(c->primary->state);
2889                 if (!state->vma)
2890                         continue;
2891
2892                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2893                         fb = state->base.fb;
2894                         drm_framebuffer_get(fb);
2895                         goto valid_fb;
2896                 }
2897         }
2898
2899         /*
2900          * We've failed to reconstruct the BIOS FB.  Current display state
2901          * indicates that the primary plane is visible, but has a NULL FB,
2902          * which will lead to problems later if we don't fix it up.  The
2903          * simplest solution is to just disable the primary plane now and
2904          * pretend the BIOS never had it enabled.
2905          */
2906         intel_plane_disable_noatomic(intel_crtc, intel_plane);
2907
2908         return;
2909
2910 valid_fb:
2911         intel_state->base.rotation = plane_config->rotation;
2912         intel_fill_fb_ggtt_view(&intel_state->view, fb,
2913                                 intel_state->base.rotation);
2914         intel_state->color_plane[0].stride =
2915                 intel_fb_pitch(fb, 0, intel_state->base.rotation);
2916
2917         mutex_lock(&dev->struct_mutex);
2918         intel_state->vma =
2919                 intel_pin_and_fence_fb_obj(fb,
2920                                            &intel_state->view,
2921                                            intel_plane_uses_fence(intel_state),
2922                                            &intel_state->flags);
2923         mutex_unlock(&dev->struct_mutex);
2924         if (IS_ERR(intel_state->vma)) {
2925                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2926                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
2927
2928                 intel_state->vma = NULL;
2929                 drm_framebuffer_put(fb);
2930                 return;
2931         }
2932
2933         obj = intel_fb_obj(fb);
2934         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2935
2936         plane_state->src_x = 0;
2937         plane_state->src_y = 0;
2938         plane_state->src_w = fb->width << 16;
2939         plane_state->src_h = fb->height << 16;
2940
2941         plane_state->crtc_x = 0;
2942         plane_state->crtc_y = 0;
2943         plane_state->crtc_w = fb->width;
2944         plane_state->crtc_h = fb->height;
2945
2946         intel_state->base.src = drm_plane_state_src(plane_state);
2947         intel_state->base.dst = drm_plane_state_dest(plane_state);
2948
2949         if (i915_gem_object_is_tiled(obj))
2950                 dev_priv->preserve_bios_swizzle = true;
2951
2952         plane_state->fb = fb;
2953         plane_state->crtc = &intel_crtc->base;
2954
2955         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2956                   &obj->frontbuffer_bits);
2957 }
2958
2959 static int skl_max_plane_width(const struct drm_framebuffer *fb,
2960                                int color_plane,
2961                                unsigned int rotation)
2962 {
2963         int cpp = fb->format->cpp[color_plane];
2964
2965         switch (fb->modifier) {
2966         case DRM_FORMAT_MOD_LINEAR:
2967         case I915_FORMAT_MOD_X_TILED:
2968                 switch (cpp) {
2969                 case 8:
2970                         return 4096;
2971                 case 4:
2972                 case 2:
2973                 case 1:
2974                         return 8192;
2975                 default:
2976                         MISSING_CASE(cpp);
2977                         break;
2978                 }
2979                 break;
2980         case I915_FORMAT_MOD_Y_TILED_CCS:
2981         case I915_FORMAT_MOD_Yf_TILED_CCS:
2982                 /* FIXME AUX plane? */
2983         case I915_FORMAT_MOD_Y_TILED:
2984         case I915_FORMAT_MOD_Yf_TILED:
2985                 switch (cpp) {
2986                 case 8:
2987                         return 2048;
2988                 case 4:
2989                         return 4096;
2990                 case 2:
2991                 case 1:
2992                         return 8192;
2993                 default:
2994                         MISSING_CASE(cpp);
2995                         break;
2996                 }
2997                 break;
2998         default:
2999                 MISSING_CASE(fb->modifier);
3000         }
3001
3002         return 2048;
3003 }
3004
3005 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3006                                            int main_x, int main_y, u32 main_offset)
3007 {
3008         const struct drm_framebuffer *fb = plane_state->base.fb;
3009         int hsub = fb->format->hsub;
3010         int vsub = fb->format->vsub;
3011         int aux_x = plane_state->color_plane[1].x;
3012         int aux_y = plane_state->color_plane[1].y;
3013         u32 aux_offset = plane_state->color_plane[1].offset;
3014         u32 alignment = intel_surf_alignment(fb, 1);
3015
3016         while (aux_offset >= main_offset && aux_y <= main_y) {
3017                 int x, y;
3018
3019                 if (aux_x == main_x && aux_y == main_y)
3020                         break;
3021
3022                 if (aux_offset == 0)
3023                         break;
3024
3025                 x = aux_x / hsub;
3026                 y = aux_y / vsub;
3027                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3028                                                                aux_offset, aux_offset - alignment);
3029                 aux_x = x * hsub + aux_x % hsub;
3030                 aux_y = y * vsub + aux_y % vsub;
3031         }
3032
3033         if (aux_x != main_x || aux_y != main_y)
3034                 return false;
3035
3036         plane_state->color_plane[1].offset = aux_offset;
3037         plane_state->color_plane[1].x = aux_x;
3038         plane_state->color_plane[1].y = aux_y;
3039
3040         return true;
3041 }
3042
3043 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3044 {
3045         const struct drm_framebuffer *fb = plane_state->base.fb;
3046         unsigned int rotation = plane_state->base.rotation;
3047         int x = plane_state->base.src.x1 >> 16;
3048         int y = plane_state->base.src.y1 >> 16;
3049         int w = drm_rect_width(&plane_state->base.src) >> 16;
3050         int h = drm_rect_height(&plane_state->base.src) >> 16;
3051         int max_width = skl_max_plane_width(fb, 0, rotation);
3052         int max_height = 4096;
3053         u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3054
3055         if (w > max_width || h > max_height) {
3056                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3057                               w, h, max_width, max_height);
3058                 return -EINVAL;
3059         }
3060
3061         intel_add_fb_offsets(&x, &y, plane_state, 0);
3062         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3063         alignment = intel_surf_alignment(fb, 0);
3064
3065         /*
3066          * AUX surface offset is specified as the distance from the
3067          * main surface offset, and it must be non-negative. Make
3068          * sure that is what we will get.
3069          */
3070         if (offset > aux_offset)
3071                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3072                                                            offset, aux_offset & ~(alignment - 1));
3073
3074         /*
3075          * When using an X-tiled surface, the plane blows up
3076          * if the x offset + width exceed the stride.
3077          *
3078          * TODO: linear and Y-tiled seem fine, Yf untested,
3079          */
3080         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3081                 int cpp = fb->format->cpp[0];
3082
3083                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3084                         if (offset == 0) {
3085                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3086                                 return -EINVAL;
3087                         }
3088
3089                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3090                                                                    offset, offset - alignment);
3091                 }
3092         }
3093
3094         /*
3095          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3096          * they match with the main surface x/y offsets.
3097          */
3098         if (is_ccs_modifier(fb->modifier)) {
3099                 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3100                         if (offset == 0)
3101                                 break;
3102
3103                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3104                                                                    offset, offset - alignment);
3105                 }
3106
3107                 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3108                         DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3109                         return -EINVAL;
3110                 }
3111         }
3112
3113         plane_state->color_plane[0].offset = offset;
3114         plane_state->color_plane[0].x = x;
3115         plane_state->color_plane[0].y = y;
3116
3117         return 0;
3118 }
3119
3120 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3121 {
3122         const struct drm_framebuffer *fb = plane_state->base.fb;
3123         unsigned int rotation = plane_state->base.rotation;
3124         int max_width = skl_max_plane_width(fb, 1, rotation);
3125         int max_height = 4096;
3126         int x = plane_state->base.src.x1 >> 17;
3127         int y = plane_state->base.src.y1 >> 17;
3128         int w = drm_rect_width(&plane_state->base.src) >> 17;
3129         int h = drm_rect_height(&plane_state->base.src) >> 17;
3130         u32 offset;
3131
3132         intel_add_fb_offsets(&x, &y, plane_state, 1);
3133         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3134
3135         /* FIXME not quite sure how/if these apply to the chroma plane */
3136         if (w > max_width || h > max_height) {
3137                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3138                               w, h, max_width, max_height);
3139                 return -EINVAL;
3140         }
3141
3142         plane_state->color_plane[1].offset = offset;
3143         plane_state->color_plane[1].x = x;
3144         plane_state->color_plane[1].y = y;
3145
3146         return 0;
3147 }
3148
3149 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3150 {
3151         const struct drm_framebuffer *fb = plane_state->base.fb;
3152         int src_x = plane_state->base.src.x1 >> 16;
3153         int src_y = plane_state->base.src.y1 >> 16;
3154         int hsub = fb->format->hsub;
3155         int vsub = fb->format->vsub;
3156         int x = src_x / hsub;
3157         int y = src_y / vsub;
3158         u32 offset;
3159
3160         intel_add_fb_offsets(&x, &y, plane_state, 1);
3161         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3162
3163         plane_state->color_plane[1].offset = offset;
3164         plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3165         plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3166
3167         return 0;
3168 }
3169
3170 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3171 {
3172         const struct drm_framebuffer *fb = plane_state->base.fb;
3173         unsigned int rotation = plane_state->base.rotation;
3174         int ret;
3175
3176         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3177         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3178         plane_state->color_plane[1].stride = intel_fb_pitch(fb, 1, rotation);
3179
3180         ret = intel_plane_check_stride(plane_state);
3181         if (ret)
3182                 return ret;
3183
3184         if (!plane_state->base.visible)
3185                 return 0;
3186
3187         /* Rotate src coordinates to match rotated GTT view */
3188         if (drm_rotation_90_or_270(rotation))
3189                 drm_rect_rotate(&plane_state->base.src,
3190                                 fb->width << 16, fb->height << 16,
3191                                 DRM_MODE_ROTATE_270);
3192
3193         /*
3194          * Handle the AUX surface first since
3195          * the main surface setup depends on it.
3196          */
3197         if (is_planar_yuv_format(fb->format->format)) {
3198                 ret = skl_check_nv12_aux_surface(plane_state);
3199                 if (ret)
3200                         return ret;
3201         } else if (is_ccs_modifier(fb->modifier)) {
3202                 ret = skl_check_ccs_aux_surface(plane_state);
3203                 if (ret)
3204                         return ret;
3205         } else {
3206                 plane_state->color_plane[1].offset = ~0xfff;
3207                 plane_state->color_plane[1].x = 0;
3208                 plane_state->color_plane[1].y = 0;
3209         }
3210
3211         ret = skl_check_main_surface(plane_state);
3212         if (ret)
3213                 return ret;
3214
3215         return 0;
3216 }
3217
3218 unsigned int
3219 i9xx_plane_max_stride(struct intel_plane *plane,
3220                       u32 pixel_format, u64 modifier,
3221                       unsigned int rotation)
3222 {
3223         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3224
3225         if (!HAS_GMCH(dev_priv)) {
3226                 return 32*1024;
3227         } else if (INTEL_GEN(dev_priv) >= 4) {
3228                 if (modifier == I915_FORMAT_MOD_X_TILED)
3229                         return 16*1024;
3230                 else
3231                         return 32*1024;
3232         } else if (INTEL_GEN(dev_priv) >= 3) {
3233                 if (modifier == I915_FORMAT_MOD_X_TILED)
3234                         return 8*1024;
3235                 else
3236                         return 16*1024;
3237         } else {
3238                 if (plane->i9xx_plane == PLANE_C)
3239                         return 4*1024;
3240                 else
3241                         return 8*1024;
3242         }
3243 }
3244
3245 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3246 {
3247         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3248         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3249         u32 dspcntr = 0;
3250
3251         dspcntr |= DISPPLANE_GAMMA_ENABLE;
3252
3253         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3254                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3255
3256         if (INTEL_GEN(dev_priv) < 5)
3257                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3258
3259         return dspcntr;
3260 }
3261
3262 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3263                           const struct intel_plane_state *plane_state)
3264 {
3265         struct drm_i915_private *dev_priv =
3266                 to_i915(plane_state->base.plane->dev);
3267         const struct drm_framebuffer *fb = plane_state->base.fb;
3268         unsigned int rotation = plane_state->base.rotation;
3269         u32 dspcntr;
3270
3271         dspcntr = DISPLAY_PLANE_ENABLE;
3272
3273         if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3274             IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3275                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3276
3277         switch (fb->format->format) {
3278         case DRM_FORMAT_C8:
3279                 dspcntr |= DISPPLANE_8BPP;
3280                 break;
3281         case DRM_FORMAT_XRGB1555:
3282                 dspcntr |= DISPPLANE_BGRX555;
3283                 break;
3284         case DRM_FORMAT_RGB565:
3285                 dspcntr |= DISPPLANE_BGRX565;
3286                 break;
3287         case DRM_FORMAT_XRGB8888:
3288                 dspcntr |= DISPPLANE_BGRX888;
3289                 break;
3290         case DRM_FORMAT_XBGR8888:
3291                 dspcntr |= DISPPLANE_RGBX888;
3292                 break;
3293         case DRM_FORMAT_XRGB2101010:
3294                 dspcntr |= DISPPLANE_BGRX101010;
3295                 break;
3296         case DRM_FORMAT_XBGR2101010:
3297                 dspcntr |= DISPPLANE_RGBX101010;
3298                 break;
3299         default:
3300                 MISSING_CASE(fb->format->format);
3301                 return 0;
3302         }
3303
3304         if (INTEL_GEN(dev_priv) >= 4 &&
3305             fb->modifier == I915_FORMAT_MOD_X_TILED)
3306                 dspcntr |= DISPPLANE_TILED;
3307
3308         if (rotation & DRM_MODE_ROTATE_180)
3309                 dspcntr |= DISPPLANE_ROTATE_180;
3310
3311         if (rotation & DRM_MODE_REFLECT_X)
3312                 dspcntr |= DISPPLANE_MIRROR;
3313
3314         return dspcntr;
3315 }
3316
3317 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3318 {
3319         struct drm_i915_private *dev_priv =
3320                 to_i915(plane_state->base.plane->dev);
3321         const struct drm_framebuffer *fb = plane_state->base.fb;
3322         unsigned int rotation = plane_state->base.rotation;
3323         int src_x = plane_state->base.src.x1 >> 16;
3324         int src_y = plane_state->base.src.y1 >> 16;
3325         u32 offset;
3326         int ret;
3327
3328         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3329         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3330
3331         ret = intel_plane_check_stride(plane_state);
3332         if (ret)
3333                 return ret;
3334
3335         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3336
3337         if (INTEL_GEN(dev_priv) >= 4)
3338                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3339                                                             plane_state, 0);
3340         else
3341                 offset = 0;
3342
3343         /* HSW/BDW do this automagically in hardware */
3344         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3345                 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3346                 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3347
3348                 if (rotation & DRM_MODE_ROTATE_180) {
3349                         src_x += src_w - 1;
3350                         src_y += src_h - 1;
3351                 } else if (rotation & DRM_MODE_REFLECT_X) {
3352                         src_x += src_w - 1;
3353                 }
3354         }
3355
3356         plane_state->color_plane[0].offset = offset;
3357         plane_state->color_plane[0].x = src_x;
3358         plane_state->color_plane[0].y = src_y;
3359
3360         return 0;
3361 }
3362
3363 static int
3364 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3365                  struct intel_plane_state *plane_state)
3366 {
3367         int ret;
3368
3369         ret = chv_plane_check_rotation(plane_state);
3370         if (ret)
3371                 return ret;
3372
3373         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3374                                                   &crtc_state->base,
3375                                                   DRM_PLANE_HELPER_NO_SCALING,
3376                                                   DRM_PLANE_HELPER_NO_SCALING,
3377                                                   false, true);
3378         if (ret)
3379                 return ret;
3380
3381         if (!plane_state->base.visible)
3382                 return 0;
3383
3384         ret = intel_plane_check_src_coordinates(plane_state);
3385         if (ret)
3386                 return ret;
3387
3388         ret = i9xx_check_plane_surface(plane_state);
3389         if (ret)
3390                 return ret;
3391
3392         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3393
3394         return 0;
3395 }
3396
3397 static void i9xx_update_plane(struct intel_plane *plane,
3398                               const struct intel_crtc_state *crtc_state,
3399                               const struct intel_plane_state *plane_state)
3400 {
3401         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3402         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3403         u32 linear_offset;
3404         int x = plane_state->color_plane[0].x;
3405         int y = plane_state->color_plane[0].y;
3406         unsigned long irqflags;
3407         u32 dspaddr_offset;
3408         u32 dspcntr;
3409
3410         dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3411
3412         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3413
3414         if (INTEL_GEN(dev_priv) >= 4)
3415                 dspaddr_offset = plane_state->color_plane[0].offset;
3416         else
3417                 dspaddr_offset = linear_offset;
3418
3419         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3420
3421         I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3422
3423         if (INTEL_GEN(dev_priv) < 4) {
3424                 /* pipesrc and dspsize control the size that is scaled from,
3425                  * which should always be the user's requested size.
3426                  */
3427                 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3428                 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3429                               ((crtc_state->pipe_src_h - 1) << 16) |
3430                               (crtc_state->pipe_src_w - 1));
3431         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3432                 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3433                 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3434                               ((crtc_state->pipe_src_h - 1) << 16) |
3435                               (crtc_state->pipe_src_w - 1));
3436                 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3437         }
3438
3439         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3440                 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3441         } else if (INTEL_GEN(dev_priv) >= 4) {
3442                 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3443                 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3444         }
3445
3446         /*
3447          * The control register self-arms if the plane was previously
3448          * disabled. Try to make the plane enable atomic by writing
3449          * the control register just before the surface register.
3450          */
3451         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3452         if (INTEL_GEN(dev_priv) >= 4)
3453                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3454                               intel_plane_ggtt_offset(plane_state) +
3455                               dspaddr_offset);
3456         else
3457                 I915_WRITE_FW(DSPADDR(i9xx_plane),
3458                               intel_plane_ggtt_offset(plane_state) +
3459                               dspaddr_offset);
3460
3461         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3462 }
3463
3464 static void i9xx_disable_plane(struct intel_plane *plane,
3465                                const struct intel_crtc_state *crtc_state)
3466 {
3467         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3468         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3469         unsigned long irqflags;
3470         u32 dspcntr;
3471
3472         /*
3473          * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3474          * enable on ilk+ affect the pipe bottom color as
3475          * well, so we must configure them even if the plane
3476          * is disabled.
3477          *
3478          * On pre-g4x there is no way to gamma correct the
3479          * pipe bottom color but we'll keep on doing this
3480          * anyway.
3481          */
3482         dspcntr = i9xx_plane_ctl_crtc(crtc_state);
3483
3484         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3485
3486         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3487         if (INTEL_GEN(dev_priv) >= 4)
3488                 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3489         else
3490                 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3491
3492         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3493 }
3494
3495 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3496                                     enum pipe *pipe)
3497 {
3498         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3499         enum intel_display_power_domain power_domain;
3500         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3501         intel_wakeref_t wakeref;
3502         bool ret;
3503         u32 val;
3504
3505         /*
3506          * Not 100% correct for planes that can move between pipes,
3507          * but that's only the case for gen2-4 which don't have any
3508          * display power wells.
3509          */
3510         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3511         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3512         if (!wakeref)
3513                 return false;
3514
3515         val = I915_READ(DSPCNTR(i9xx_plane));
3516
3517         ret = val & DISPLAY_PLANE_ENABLE;
3518
3519         if (INTEL_GEN(dev_priv) >= 5)
3520                 *pipe = plane->pipe;
3521         else
3522                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3523                         DISPPLANE_SEL_PIPE_SHIFT;
3524
3525         intel_display_power_put(dev_priv, power_domain, wakeref);
3526
3527         return ret;
3528 }
3529
3530 static u32
3531 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
3532 {
3533         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3534                 return 64;
3535         else
3536                 return intel_tile_width_bytes(fb, color_plane);
3537 }
3538
3539 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3540 {
3541         struct drm_device *dev = intel_crtc->base.dev;
3542         struct drm_i915_private *dev_priv = to_i915(dev);
3543
3544         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3545         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3546         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3547 }
3548
3549 /*
3550  * This function detaches (aka. unbinds) unused scalers in hardware
3551  */
3552 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
3553 {
3554         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3555         const struct intel_crtc_scaler_state *scaler_state =
3556                 &crtc_state->scaler_state;
3557         int i;
3558
3559         /* loop through and disable scalers that aren't in use */
3560         for (i = 0; i < intel_crtc->num_scalers; i++) {
3561                 if (!scaler_state->scalers[i].in_use)
3562                         skl_detach_scaler(intel_crtc, i);
3563         }
3564 }
3565
3566 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3567                                           int color_plane, unsigned int rotation)
3568 {
3569         /*
3570          * The stride is either expressed as a multiple of 64 bytes chunks for
3571          * linear buffers or in number of tiles for tiled buffers.
3572          */
3573         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3574                 return 64;
3575         else if (drm_rotation_90_or_270(rotation))
3576                 return intel_tile_height(fb, color_plane);
3577         else
3578                 return intel_tile_width_bytes(fb, color_plane);
3579 }
3580
3581 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3582                      int color_plane)
3583 {
3584         const struct drm_framebuffer *fb = plane_state->base.fb;
3585         unsigned int rotation = plane_state->base.rotation;
3586         u32 stride = plane_state->color_plane[color_plane].stride;
3587
3588         if (color_plane >= fb->format->num_planes)
3589                 return 0;
3590
3591         return stride / skl_plane_stride_mult(fb, color_plane, rotation);
3592 }
3593
3594 static u32 skl_plane_ctl_format(u32 pixel_format)
3595 {
3596         switch (pixel_format) {
3597         case DRM_FORMAT_C8:
3598                 return PLANE_CTL_FORMAT_INDEXED;
3599         case DRM_FORMAT_RGB565:
3600                 return PLANE_CTL_FORMAT_RGB_565;
3601         case DRM_FORMAT_XBGR8888:
3602         case DRM_FORMAT_ABGR8888:
3603                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3604         case DRM_FORMAT_XRGB8888:
3605         case DRM_FORMAT_ARGB8888:
3606                 return PLANE_CTL_FORMAT_XRGB_8888;
3607         case DRM_FORMAT_XRGB2101010:
3608                 return PLANE_CTL_FORMAT_XRGB_2101010;
3609         case DRM_FORMAT_XBGR2101010:
3610                 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3611         case DRM_FORMAT_YUYV:
3612                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3613         case DRM_FORMAT_YVYU:
3614                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3615         case DRM_FORMAT_UYVY:
3616                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3617         case DRM_FORMAT_VYUY:
3618                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3619         case DRM_FORMAT_NV12:
3620                 return PLANE_CTL_FORMAT_NV12;
3621         case DRM_FORMAT_P010:
3622                 return PLANE_CTL_FORMAT_P010;
3623         case DRM_FORMAT_P012:
3624                 return PLANE_CTL_FORMAT_P012;
3625         case DRM_FORMAT_P016:
3626                 return PLANE_CTL_FORMAT_P016;
3627         case DRM_FORMAT_Y210:
3628                 return PLANE_CTL_FORMAT_Y210;
3629         case DRM_FORMAT_Y212:
3630                 return PLANE_CTL_FORMAT_Y212;
3631         case DRM_FORMAT_Y216:
3632                 return PLANE_CTL_FORMAT_Y216;
3633         case DRM_FORMAT_Y410:
3634                 return PLANE_CTL_FORMAT_Y410;
3635         case DRM_FORMAT_Y412:
3636                 return PLANE_CTL_FORMAT_Y412;
3637         case DRM_FORMAT_Y416:
3638                 return PLANE_CTL_FORMAT_Y416;
3639         default:
3640                 MISSING_CASE(pixel_format);
3641         }
3642
3643         return 0;
3644 }
3645
3646 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
3647 {
3648         if (!plane_state->base.fb->format->has_alpha)
3649                 return PLANE_CTL_ALPHA_DISABLE;
3650
3651         switch (plane_state->base.pixel_blend_mode) {
3652         case DRM_MODE_BLEND_PIXEL_NONE:
3653                 return PLANE_CTL_ALPHA_DISABLE;
3654         case DRM_MODE_BLEND_PREMULTI:
3655                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3656         case DRM_MODE_BLEND_COVERAGE:
3657                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
3658         default:
3659                 MISSING_CASE(plane_state->base.pixel_blend_mode);
3660                 return PLANE_CTL_ALPHA_DISABLE;
3661         }
3662 }
3663
3664 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
3665 {
3666         if (!plane_state->base.fb->format->has_alpha)
3667                 return PLANE_COLOR_ALPHA_DISABLE;
3668
3669         switch (plane_state->base.pixel_blend_mode) {
3670         case DRM_MODE_BLEND_PIXEL_NONE:
3671                 return PLANE_COLOR_ALPHA_DISABLE;
3672         case DRM_MODE_BLEND_PREMULTI:
3673                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
3674         case DRM_MODE_BLEND_COVERAGE:
3675                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
3676         default:
3677                 MISSING_CASE(plane_state->base.pixel_blend_mode);
3678                 return PLANE_COLOR_ALPHA_DISABLE;
3679         }
3680 }
3681
3682 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
3683 {
3684         switch (fb_modifier) {
3685         case DRM_FORMAT_MOD_LINEAR:
3686                 break;
3687         case I915_FORMAT_MOD_X_TILED:
3688                 return PLANE_CTL_TILED_X;
3689         case I915_FORMAT_MOD_Y_TILED:
3690                 return PLANE_CTL_TILED_Y;
3691         case I915_FORMAT_MOD_Y_TILED_CCS:
3692                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3693         case I915_FORMAT_MOD_Yf_TILED:
3694                 return PLANE_CTL_TILED_YF;
3695         case I915_FORMAT_MOD_Yf_TILED_CCS:
3696                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3697         default:
3698                 MISSING_CASE(fb_modifier);
3699         }
3700
3701         return 0;
3702 }
3703
3704 static u32 skl_plane_ctl_rotate(unsigned int rotate)
3705 {
3706         switch (rotate) {
3707         case DRM_MODE_ROTATE_0:
3708                 break;
3709         /*
3710          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
3711          * while i915 HW rotation is clockwise, thats why this swapping.
3712          */
3713         case DRM_MODE_ROTATE_90:
3714                 return PLANE_CTL_ROTATE_270;
3715         case DRM_MODE_ROTATE_180:
3716                 return PLANE_CTL_ROTATE_180;
3717         case DRM_MODE_ROTATE_270:
3718                 return PLANE_CTL_ROTATE_90;
3719         default:
3720                 MISSING_CASE(rotate);
3721         }
3722
3723         return 0;
3724 }
3725
3726 static u32 cnl_plane_ctl_flip(unsigned int reflect)
3727 {
3728         switch (reflect) {
3729         case 0:
3730                 break;
3731         case DRM_MODE_REFLECT_X:
3732                 return PLANE_CTL_FLIP_HORIZONTAL;
3733         case DRM_MODE_REFLECT_Y:
3734         default:
3735                 MISSING_CASE(reflect);
3736         }
3737
3738         return 0;
3739 }
3740
3741 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3742 {
3743         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3744         u32 plane_ctl = 0;
3745
3746         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3747                 return plane_ctl;
3748
3749         plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
3750         plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
3751
3752         return plane_ctl;
3753 }
3754
3755 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3756                   const struct intel_plane_state *plane_state)
3757 {
3758         struct drm_i915_private *dev_priv =
3759                 to_i915(plane_state->base.plane->dev);
3760         const struct drm_framebuffer *fb = plane_state->base.fb;
3761         unsigned int rotation = plane_state->base.rotation;
3762         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
3763         u32 plane_ctl;
3764
3765         plane_ctl = PLANE_CTL_ENABLE;
3766
3767         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
3768                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
3769                 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3770
3771                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3772                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
3773
3774                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3775                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
3776         }
3777
3778         plane_ctl |= skl_plane_ctl_format(fb->format->format);
3779         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
3780         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
3781
3782         if (INTEL_GEN(dev_priv) >= 10)
3783                 plane_ctl |= cnl_plane_ctl_flip(rotation &
3784                                                 DRM_MODE_REFLECT_MASK);
3785
3786         if (key->flags & I915_SET_COLORKEY_DESTINATION)
3787                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3788         else if (key->flags & I915_SET_COLORKEY_SOURCE)
3789                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3790
3791         return plane_ctl;
3792 }
3793
3794 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
3795 {
3796         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3797         u32 plane_color_ctl = 0;
3798
3799         if (INTEL_GEN(dev_priv) >= 11)
3800                 return plane_color_ctl;
3801
3802         plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
3803         plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3804
3805         return plane_color_ctl;
3806 }
3807
3808 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3809                         const struct intel_plane_state *plane_state)
3810 {
3811         struct drm_i915_private *dev_priv =
3812                 to_i915(plane_state->base.plane->dev);
3813         const struct drm_framebuffer *fb = plane_state->base.fb;
3814         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3815         u32 plane_color_ctl = 0;
3816
3817         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
3818         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
3819
3820         if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
3821                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3822                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3823                 else
3824                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
3825
3826                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3827                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
3828         } else if (fb->format->is_yuv) {
3829                 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
3830         }
3831
3832         return plane_color_ctl;
3833 }
3834
3835 static int
3836 __intel_display_resume(struct drm_device *dev,
3837                        struct drm_atomic_state *state,
3838                        struct drm_modeset_acquire_ctx *ctx)
3839 {
3840         struct drm_crtc_state *crtc_state;
3841         struct drm_crtc *crtc;
3842         int i, ret;
3843
3844         intel_modeset_setup_hw_state(dev, ctx);
3845         i915_redisable_vga(to_i915(dev));
3846
3847         if (!state)
3848                 return 0;
3849
3850         /*
3851          * We've duplicated the state, pointers to the old state are invalid.
3852          *
3853          * Don't attempt to use the old state until we commit the duplicated state.
3854          */
3855         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
3856                 /*
3857                  * Force recalculation even if we restore
3858                  * current state. With fast modeset this may not result
3859                  * in a modeset when the state is compatible.
3860                  */
3861                 crtc_state->mode_changed = true;
3862         }
3863
3864         /* ignore any reset values/BIOS leftovers in the WM registers */
3865         if (!HAS_GMCH(to_i915(dev)))
3866                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
3867
3868         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
3869
3870         WARN_ON(ret == -EDEADLK);
3871         return ret;
3872 }
3873
3874 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3875 {
3876         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
3877                 intel_has_gpu_reset(dev_priv));
3878 }
3879
3880 void intel_prepare_reset(struct drm_i915_private *dev_priv)
3881 {
3882         struct drm_device *dev = &dev_priv->drm;
3883         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3884         struct drm_atomic_state *state;
3885         int ret;
3886
3887         /* reset doesn't touch the display */
3888         if (!i915_modparams.force_reset_modeset_test &&
3889             !gpu_reset_clobbers_display(dev_priv))
3890                 return;
3891
3892         /* We have a modeset vs reset deadlock, defensively unbreak it. */
3893         set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3894         wake_up_all(&dev_priv->gpu_error.wait_queue);
3895
3896         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
3897                 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
3898                 i915_gem_set_wedged(dev_priv);
3899         }
3900
3901         /*
3902          * Need mode_config.mutex so that we don't
3903          * trample ongoing ->detect() and whatnot.
3904          */
3905         mutex_lock(&dev->mode_config.mutex);
3906         drm_modeset_acquire_init(ctx, 0);
3907         while (1) {
3908                 ret = drm_modeset_lock_all_ctx(dev, ctx);
3909                 if (ret != -EDEADLK)
3910                         break;
3911
3912                 drm_modeset_backoff(ctx);
3913         }
3914         /*
3915          * Disabling the crtcs gracefully seems nicer. Also the
3916          * g33 docs say we should at least disable all the planes.
3917          */
3918         state = drm_atomic_helper_duplicate_state(dev, ctx);
3919         if (IS_ERR(state)) {
3920                 ret = PTR_ERR(state);
3921                 DRM_ERROR("Duplicating state failed with %i\n", ret);
3922                 return;
3923         }
3924
3925         ret = drm_atomic_helper_disable_all(dev, ctx);
3926         if (ret) {
3927                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
3928                 drm_atomic_state_put(state);
3929                 return;
3930         }
3931
3932         dev_priv->modeset_restore_state = state;
3933         state->acquire_ctx = ctx;
3934 }
3935
3936 void intel_finish_reset(struct drm_i915_private *dev_priv)
3937 {
3938         struct drm_device *dev = &dev_priv->drm;
3939         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3940         struct drm_atomic_state *state;
3941         int ret;
3942
3943         /* reset doesn't touch the display */
3944         if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
3945                 return;
3946
3947         state = fetch_and_zero(&dev_priv->modeset_restore_state);
3948         if (!state)
3949                 goto unlock;
3950
3951         /* reset doesn't touch the display */
3952         if (!gpu_reset_clobbers_display(dev_priv)) {
3953                 /* for testing only restore the display */
3954                 ret = __intel_display_resume(dev, state, ctx);
3955                 if (ret)
3956                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3957         } else {
3958                 /*
3959                  * The display has been reset as well,
3960                  * so need a full re-initialization.
3961                  */
3962                 intel_runtime_pm_disable_interrupts(dev_priv);
3963                 intel_runtime_pm_enable_interrupts(dev_priv);
3964
3965                 intel_pps_unlock_regs_wa(dev_priv);
3966                 intel_modeset_init_hw(dev);
3967                 intel_init_clock_gating(dev_priv);
3968
3969                 spin_lock_irq(&dev_priv->irq_lock);
3970                 if (dev_priv->display.hpd_irq_setup)
3971                         dev_priv->display.hpd_irq_setup(dev_priv);
3972                 spin_unlock_irq(&dev_priv->irq_lock);
3973
3974                 ret = __intel_display_resume(dev, state, ctx);
3975                 if (ret)
3976                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3977
3978                 intel_hpd_init(dev_priv);
3979         }
3980
3981         drm_atomic_state_put(state);
3982 unlock:
3983         drm_modeset_drop_locks(ctx);
3984         drm_modeset_acquire_fini(ctx);
3985         mutex_unlock(&dev->mode_config.mutex);
3986
3987         clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3988 }
3989
3990 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
3991 {
3992         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3993         enum pipe pipe = crtc->pipe;
3994         u32 tmp;
3995
3996         tmp = I915_READ(PIPE_CHICKEN(pipe));
3997
3998         /*
3999          * Display WA #1153: icl
4000          * enable hardware to bypass the alpha math
4001          * and rounding for per-pixel values 00 and 0xff
4002          */
4003         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4004
4005         /*
4006          * W/A for underruns with linear/X-tiled with
4007          * WM1+ disabled.
4008          */
4009         tmp |= PM_FILL_MAINTAIN_DBUF_FULLNESS;
4010
4011         I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4012 }
4013
4014 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
4015                                      const struct intel_crtc_state *new_crtc_state)
4016 {
4017         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
4018         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4019
4020         /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
4021         crtc->base.mode = new_crtc_state->base.mode;
4022
4023         /*
4024          * Update pipe size and adjust fitter if needed: the reason for this is
4025          * that in compute_mode_changes we check the native mode (not the pfit
4026          * mode) to see if we can flip rather than do a full mode set. In the
4027          * fastboot case, we'll flip, but if we don't update the pipesrc and
4028          * pfit state, we'll end up with a big fb scanned out into the wrong
4029          * sized surface.
4030          */
4031
4032         I915_WRITE(PIPESRC(crtc->pipe),
4033                    ((new_crtc_state->pipe_src_w - 1) << 16) |
4034                    (new_crtc_state->pipe_src_h - 1));
4035
4036         /* on skylake this is done by detaching scalers */
4037         if (INTEL_GEN(dev_priv) >= 9) {
4038                 skl_detach_scalers(new_crtc_state);
4039
4040                 if (new_crtc_state->pch_pfit.enabled)
4041                         skylake_pfit_enable(new_crtc_state);
4042         } else if (HAS_PCH_SPLIT(dev_priv)) {
4043                 if (new_crtc_state->pch_pfit.enabled)
4044                         ironlake_pfit_enable(new_crtc_state);
4045                 else if (old_crtc_state->pch_pfit.enabled)
4046                         ironlake_pfit_disable(old_crtc_state);
4047         }
4048
4049         /*
4050          * We don't (yet) allow userspace to control the pipe background color,
4051          * so force it to black, but apply pipe gamma and CSC so that its
4052          * handling will match how we program our planes.
4053          */
4054         if (INTEL_GEN(dev_priv) >= 9)
4055                 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
4056                            SKL_BOTTOM_COLOR_GAMMA_ENABLE |
4057                            SKL_BOTTOM_COLOR_CSC_ENABLE);
4058
4059         if (INTEL_GEN(dev_priv) >= 11)
4060                 icl_set_pipe_chicken(crtc);
4061 }
4062
4063 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4064 {
4065         struct drm_device *dev = crtc->base.dev;
4066         struct drm_i915_private *dev_priv = to_i915(dev);
4067         int pipe = crtc->pipe;
4068         i915_reg_t reg;
4069         u32 temp;
4070
4071         /* enable normal train */
4072         reg = FDI_TX_CTL(pipe);
4073         temp = I915_READ(reg);
4074         if (IS_IVYBRIDGE(dev_priv)) {
4075                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4076                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4077         } else {
4078                 temp &= ~FDI_LINK_TRAIN_NONE;
4079                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4080         }
4081         I915_WRITE(reg, temp);
4082
4083         reg = FDI_RX_CTL(pipe);
4084         temp = I915_READ(reg);
4085         if (HAS_PCH_CPT(dev_priv)) {
4086                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4087                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4088         } else {
4089                 temp &= ~FDI_LINK_TRAIN_NONE;
4090                 temp |= FDI_LINK_TRAIN_NONE;
4091         }
4092         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4093
4094         /* wait one idle pattern time */
4095         POSTING_READ(reg);
4096         udelay(1000);
4097
4098         /* IVB wants error correction enabled */
4099         if (IS_IVYBRIDGE(dev_priv))
4100                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4101                            FDI_FE_ERRC_ENABLE);
4102 }
4103
4104 /* The FDI link training functions for ILK/Ibexpeak. */
4105 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4106                                     const struct intel_crtc_state *crtc_state)
4107 {
4108         struct drm_device *dev = crtc->base.dev;
4109         struct drm_i915_private *dev_priv = to_i915(dev);
4110         int pipe = crtc->pipe;
4111         i915_reg_t reg;
4112         u32 temp, tries;
4113
4114         /* FDI needs bits from pipe first */
4115         assert_pipe_enabled(dev_priv, pipe);
4116
4117         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4118            for train result */
4119         reg = FDI_RX_IMR(pipe);
4120         temp = I915_READ(reg);
4121         temp &= ~FDI_RX_SYMBOL_LOCK;
4122         temp &= ~FDI_RX_BIT_LOCK;
4123         I915_WRITE(reg, temp);
4124         I915_READ(reg);
4125         udelay(150);
4126
4127         /* enable CPU FDI TX and PCH FDI RX */
4128         reg = FDI_TX_CTL(pipe);
4129         temp = I915_READ(reg);
4130         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4131         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4132         temp &= ~FDI_LINK_TRAIN_NONE;
4133         temp |= FDI_LINK_TRAIN_PATTERN_1;
4134         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4135
4136         reg = FDI_RX_CTL(pipe);
4137         temp = I915_READ(reg);
4138         temp &= ~FDI_LINK_TRAIN_NONE;
4139         temp |= FDI_LINK_TRAIN_PATTERN_1;
4140         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4141
4142         POSTING_READ(reg);
4143         udelay(150);
4144
4145         /* Ironlake workaround, enable clock pointer after FDI enable*/
4146         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4147         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4148                    FDI_RX_PHASE_SYNC_POINTER_EN);
4149
4150         reg = FDI_RX_IIR(pipe);
4151         for (tries = 0; tries < 5; tries++) {
4152                 temp = I915_READ(reg);
4153                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4154
4155                 if ((temp & FDI_RX_BIT_LOCK)) {
4156                         DRM_DEBUG_KMS("FDI train 1 done.\n");
4157                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4158                         break;
4159                 }
4160         }
4161         if (tries == 5)
4162                 DRM_ERROR("FDI train 1 fail!\n");
4163
4164         /* Train 2 */
4165         reg = FDI_TX_CTL(pipe);
4166         temp = I915_READ(reg);
4167         temp &= ~FDI_LINK_TRAIN_NONE;
4168         temp |= FDI_LINK_TRAIN_PATTERN_2;
4169         I915_WRITE(reg, temp);
4170
4171         reg = FDI_RX_CTL(pipe);
4172         temp = I915_READ(reg);
4173         temp &= ~FDI_LINK_TRAIN_NONE;
4174         temp |= FDI_LINK_TRAIN_PATTERN_2;
4175         I915_WRITE(reg, temp);
4176
4177         POSTING_READ(reg);
4178         udelay(150);
4179
4180         reg = FDI_RX_IIR(pipe);
4181         for (tries = 0; tries < 5; tries++) {
4182                 temp = I915_READ(reg);
4183                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4184
4185                 if (temp & FDI_RX_SYMBOL_LOCK) {
4186                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4187                         DRM_DEBUG_KMS("FDI train 2 done.\n");
4188                         break;
4189                 }
4190         }
4191         if (tries == 5)
4192                 DRM_ERROR("FDI train 2 fail!\n");
4193
4194         DRM_DEBUG_KMS("FDI train done\n");
4195
4196 }
4197
4198 static const int snb_b_fdi_train_param[] = {
4199         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4200         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4201         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4202         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4203 };
4204
4205 /* The FDI link training functions for SNB/Cougarpoint. */
4206 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4207                                 const struct intel_crtc_state *crtc_state)
4208 {
4209         struct drm_device *dev = crtc->base.dev;
4210         struct drm_i915_private *dev_priv = to_i915(dev);
4211         int pipe = crtc->pipe;
4212         i915_reg_t reg;
4213         u32 temp, i, retry;
4214
4215         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4216            for train result */
4217         reg = FDI_RX_IMR(pipe);
4218         temp = I915_READ(reg);
4219         temp &= ~FDI_RX_SYMBOL_LOCK;
4220         temp &= ~FDI_RX_BIT_LOCK;
4221         I915_WRITE(reg, temp);
4222
4223         POSTING_READ(reg);
4224         udelay(150);
4225
4226         /* enable CPU FDI TX and PCH FDI RX */
4227         reg = FDI_TX_CTL(pipe);
4228         temp = I915_READ(reg);
4229         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4230         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4231         temp &= ~FDI_LINK_TRAIN_NONE;
4232         temp |= FDI_LINK_TRAIN_PATTERN_1;
4233         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4234         /* SNB-B */
4235         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4236         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4237
4238         I915_WRITE(FDI_RX_MISC(pipe),
4239                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4240
4241         reg = FDI_RX_CTL(pipe);
4242         temp = I915_READ(reg);
4243         if (HAS_PCH_CPT(dev_priv)) {
4244                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4245                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4246         } else {
4247                 temp &= ~FDI_LINK_TRAIN_NONE;
4248                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4249         }
4250         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4251
4252         POSTING_READ(reg);
4253         udelay(150);
4254
4255         for (i = 0; i < 4; i++) {
4256                 reg = FDI_TX_CTL(pipe);
4257                 temp = I915_READ(reg);
4258                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4259                 temp |= snb_b_fdi_train_param[i];
4260                 I915_WRITE(reg, temp);
4261
4262                 POSTING_READ(reg);
4263                 udelay(500);
4264
4265                 for (retry = 0; retry < 5; retry++) {
4266                         reg = FDI_RX_IIR(pipe);
4267                         temp = I915_READ(reg);
4268                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4269                         if (temp & FDI_RX_BIT_LOCK) {
4270                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4271                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
4272                                 break;
4273                         }
4274                         udelay(50);
4275                 }
4276                 if (retry < 5)
4277                         break;
4278         }
4279         if (i == 4)
4280                 DRM_ERROR("FDI train 1 fail!\n");
4281
4282         /* Train 2 */
4283         reg = FDI_TX_CTL(pipe);
4284         temp = I915_READ(reg);
4285         temp &= ~FDI_LINK_TRAIN_NONE;
4286         temp |= FDI_LINK_TRAIN_PATTERN_2;
4287         if (IS_GEN(dev_priv, 6)) {
4288                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4289                 /* SNB-B */
4290                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4291         }
4292         I915_WRITE(reg, temp);
4293
4294         reg = FDI_RX_CTL(pipe);
4295         temp = I915_READ(reg);
4296         if (HAS_PCH_CPT(dev_priv)) {
4297                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4298                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4299         } else {
4300                 temp &= ~FDI_LINK_TRAIN_NONE;
4301                 temp |= FDI_LINK_TRAIN_PATTERN_2;
4302         }
4303         I915_WRITE(reg, temp);
4304
4305         POSTING_READ(reg);
4306         udelay(150);
4307
4308         for (i = 0; i < 4; i++) {
4309                 reg = FDI_TX_CTL(pipe);
4310                 temp = I915_READ(reg);
4311                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4312                 temp |= snb_b_fdi_train_param[i];
4313                 I915_WRITE(reg, temp);
4314
4315                 POSTING_READ(reg);
4316                 udelay(500);
4317
4318                 for (retry = 0; retry < 5; retry++) {
4319                         reg = FDI_RX_IIR(pipe);
4320                         temp = I915_READ(reg);
4321                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4322                         if (temp & FDI_RX_SYMBOL_LOCK) {
4323                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4324                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
4325                                 break;
4326                         }
4327                         udelay(50);
4328                 }
4329                 if (retry < 5)
4330                         break;
4331         }
4332         if (i == 4)
4333                 DRM_ERROR("FDI train 2 fail!\n");
4334
4335         DRM_DEBUG_KMS("FDI train done.\n");
4336 }
4337
4338 /* Manual link training for Ivy Bridge A0 parts */
4339 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4340                                       const struct intel_crtc_state *crtc_state)
4341 {
4342         struct drm_device *dev = crtc->base.dev;
4343         struct drm_i915_private *dev_priv = to_i915(dev);
4344         int pipe = crtc->pipe;
4345         i915_reg_t reg;
4346         u32 temp, i, j;
4347
4348         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4349            for train result */
4350         reg = FDI_RX_IMR(pipe);
4351         temp = I915_READ(reg);
4352         temp &= ~FDI_RX_SYMBOL_LOCK;
4353         temp &= ~FDI_RX_BIT_LOCK;
4354         I915_WRITE(reg, temp);
4355
4356         POSTING_READ(reg);
4357         udelay(150);
4358
4359         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4360                       I915_READ(FDI_RX_IIR(pipe)));
4361
4362         /* Try each vswing and preemphasis setting twice before moving on */
4363         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4364                 /* disable first in case we need to retry */
4365                 reg = FDI_TX_CTL(pipe);
4366                 temp = I915_READ(reg);
4367                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4368                 temp &= ~FDI_TX_ENABLE;
4369                 I915_WRITE(reg, temp);
4370
4371                 reg = FDI_RX_CTL(pipe);
4372                 temp = I915_READ(reg);
4373                 temp &= ~FDI_LINK_TRAIN_AUTO;
4374                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4375                 temp &= ~FDI_RX_ENABLE;
4376                 I915_WRITE(reg, temp);
4377
4378                 /* enable CPU FDI TX and PCH FDI RX */
4379                 reg = FDI_TX_CTL(pipe);
4380                 temp = I915_READ(reg);
4381                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4382                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4383                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4384                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4385                 temp |= snb_b_fdi_train_param[j/2];
4386                 temp |= FDI_COMPOSITE_SYNC;
4387                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4388
4389                 I915_WRITE(FDI_RX_MISC(pipe),
4390                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4391
4392                 reg = FDI_RX_CTL(pipe);
4393                 temp = I915_READ(reg);
4394                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4395                 temp |= FDI_COMPOSITE_SYNC;
4396                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4397
4398                 POSTING_READ(reg);
4399                 udelay(1); /* should be 0.5us */
4400
4401                 for (i = 0; i < 4; i++) {
4402                         reg = FDI_RX_IIR(pipe);
4403                         temp = I915_READ(reg);
4404                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4405
4406                         if (temp & FDI_RX_BIT_LOCK ||
4407                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4408                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4409                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4410                                               i);
4411                                 break;
4412                         }
4413                         udelay(1); /* should be 0.5us */
4414                 }
4415                 if (i == 4) {
4416                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4417                         continue;
4418                 }
4419
4420                 /* Train 2 */
4421                 reg = FDI_TX_CTL(pipe);
4422                 temp = I915_READ(reg);
4423                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4424                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4425                 I915_WRITE(reg, temp);
4426
4427                 reg = FDI_RX_CTL(pipe);
4428                 temp = I915_READ(reg);
4429                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4430                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4431                 I915_WRITE(reg, temp);
4432
4433                 POSTING_READ(reg);
4434                 udelay(2); /* should be 1.5us */
4435
4436                 for (i = 0; i < 4; i++) {
4437                         reg = FDI_RX_IIR(pipe);
4438                         temp = I915_READ(reg);
4439                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4440
4441                         if (temp & FDI_RX_SYMBOL_LOCK ||
4442                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4443                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4444                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4445                                               i);
4446                                 goto train_done;
4447                         }
4448                         udelay(2); /* should be 1.5us */
4449                 }
4450                 if (i == 4)
4451                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4452         }
4453
4454 train_done:
4455         DRM_DEBUG_KMS("FDI train done.\n");
4456 }
4457
4458 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4459 {
4460         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4461         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4462         int pipe = intel_crtc->pipe;
4463         i915_reg_t reg;
4464         u32 temp;
4465
4466         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4467         reg = FDI_RX_CTL(pipe);
4468         temp = I915_READ(reg);
4469         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4470         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4471         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4472         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4473
4474         POSTING_READ(reg);
4475         udelay(200);
4476
4477         /* Switch from Rawclk to PCDclk */
4478         temp = I915_READ(reg);
4479         I915_WRITE(reg, temp | FDI_PCDCLK);
4480
4481         POSTING_READ(reg);
4482         udelay(200);
4483
4484         /* Enable CPU FDI TX PLL, always on for Ironlake */
4485         reg = FDI_TX_CTL(pipe);
4486         temp = I915_READ(reg);
4487         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4488                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4489
4490                 POSTING_READ(reg);
4491                 udelay(100);
4492         }
4493 }
4494
4495 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4496 {
4497         struct drm_device *dev = intel_crtc->base.dev;
4498         struct drm_i915_private *dev_priv = to_i915(dev);
4499         int pipe = intel_crtc->pipe;
4500         i915_reg_t reg;
4501         u32 temp;
4502
4503         /* Switch from PCDclk to Rawclk */
4504         reg = FDI_RX_CTL(pipe);
4505         temp = I915_READ(reg);
4506         I915_WRITE(reg, temp & ~FDI_PCDCLK);
4507
4508         /* Disable CPU FDI TX PLL */
4509         reg = FDI_TX_CTL(pipe);
4510         temp = I915_READ(reg);
4511         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4512
4513         POSTING_READ(reg);
4514         udelay(100);
4515
4516         reg = FDI_RX_CTL(pipe);
4517         temp = I915_READ(reg);
4518         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4519
4520         /* Wait for the clocks to turn off. */
4521         POSTING_READ(reg);
4522         udelay(100);
4523 }
4524
4525 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4526 {
4527         struct drm_device *dev = crtc->dev;
4528         struct drm_i915_private *dev_priv = to_i915(dev);
4529         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4530         int pipe = intel_crtc->pipe;
4531         i915_reg_t reg;
4532         u32 temp;
4533
4534         /* disable CPU FDI tx and PCH FDI rx */
4535         reg = FDI_TX_CTL(pipe);
4536         temp = I915_READ(reg);
4537         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4538         POSTING_READ(reg);
4539
4540         reg = FDI_RX_CTL(pipe);
4541         temp = I915_READ(reg);
4542         temp &= ~(0x7 << 16);
4543         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4544         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4545
4546         POSTING_READ(reg);
4547         udelay(100);
4548
4549         /* Ironlake workaround, disable clock pointer after downing FDI */
4550         if (HAS_PCH_IBX(dev_priv))
4551                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4552
4553         /* still set train pattern 1 */
4554         reg = FDI_TX_CTL(pipe);
4555         temp = I915_READ(reg);
4556         temp &= ~FDI_LINK_TRAIN_NONE;
4557         temp |= FDI_LINK_TRAIN_PATTERN_1;
4558         I915_WRITE(reg, temp);
4559
4560         reg = FDI_RX_CTL(pipe);
4561         temp = I915_READ(reg);
4562         if (HAS_PCH_CPT(dev_priv)) {
4563                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4564                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4565         } else {
4566                 temp &= ~FDI_LINK_TRAIN_NONE;
4567                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4568         }
4569         /* BPC in FDI rx is consistent with that in PIPECONF */
4570         temp &= ~(0x07 << 16);
4571         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4572         I915_WRITE(reg, temp);
4573
4574         POSTING_READ(reg);
4575         udelay(100);
4576 }
4577
4578 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4579 {
4580         struct drm_crtc *crtc;
4581         bool cleanup_done;
4582
4583         drm_for_each_crtc(crtc, &dev_priv->drm) {
4584                 struct drm_crtc_commit *commit;
4585                 spin_lock(&crtc->commit_lock);
4586                 commit = list_first_entry_or_null(&crtc->commit_list,
4587                                                   struct drm_crtc_commit, commit_entry);
4588                 cleanup_done = commit ?
4589                         try_wait_for_completion(&commit->cleanup_done) : true;
4590                 spin_unlock(&crtc->commit_lock);
4591
4592                 if (cleanup_done)
4593                         continue;
4594
4595                 drm_crtc_wait_one_vblank(crtc);
4596
4597                 return true;
4598         }
4599
4600         return false;
4601 }
4602
4603 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4604 {
4605         u32 temp;
4606
4607         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4608
4609         mutex_lock(&dev_priv->sb_lock);
4610
4611         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4612         temp |= SBI_SSCCTL_DISABLE;
4613         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4614
4615         mutex_unlock(&dev_priv->sb_lock);
4616 }
4617
4618 /* Program iCLKIP clock to the desired frequency */
4619 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
4620 {
4621         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4622         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4623         int clock = crtc_state->base.adjusted_mode.crtc_clock;
4624         u32 divsel, phaseinc, auxdiv, phasedir = 0;
4625         u32 temp;
4626
4627         lpt_disable_iclkip(dev_priv);
4628
4629         /* The iCLK virtual clock root frequency is in MHz,
4630          * but the adjusted_mode->crtc_clock in in KHz. To get the
4631          * divisors, it is necessary to divide one by another, so we
4632          * convert the virtual clock precision to KHz here for higher
4633          * precision.
4634          */
4635         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4636                 u32 iclk_virtual_root_freq = 172800 * 1000;
4637                 u32 iclk_pi_range = 64;
4638                 u32 desired_divisor;
4639
4640                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4641                                                     clock << auxdiv);
4642                 divsel = (desired_divisor / iclk_pi_range) - 2;
4643                 phaseinc = desired_divisor % iclk_pi_range;
4644
4645                 /*
4646                  * Near 20MHz is a corner case which is
4647                  * out of range for the 7-bit divisor
4648                  */
4649                 if (divsel <= 0x7f)
4650                         break;
4651         }
4652
4653         /* This should not happen with any sane values */
4654         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4655                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4656         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4657                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4658
4659         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4660                         clock,
4661                         auxdiv,
4662                         divsel,
4663                         phasedir,
4664                         phaseinc);
4665
4666         mutex_lock(&dev_priv->sb_lock);
4667
4668         /* Program SSCDIVINTPHASE6 */
4669         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4670         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4671         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4672         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4673         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4674         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4675         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4676         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4677
4678         /* Program SSCAUXDIV */
4679         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4680         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4681         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4682         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4683
4684         /* Enable modulator and associated divider */
4685         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4686         temp &= ~SBI_SSCCTL_DISABLE;
4687         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4688
4689         mutex_unlock(&dev_priv->sb_lock);
4690
4691         /* Wait for initialization time */
4692         udelay(24);
4693
4694         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4695 }
4696
4697 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4698 {
4699         u32 divsel, phaseinc, auxdiv;
4700         u32 iclk_virtual_root_freq = 172800 * 1000;
4701         u32 iclk_pi_range = 64;
4702         u32 desired_divisor;
4703         u32 temp;
4704
4705         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4706                 return 0;
4707
4708         mutex_lock(&dev_priv->sb_lock);
4709
4710         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4711         if (temp & SBI_SSCCTL_DISABLE) {
4712                 mutex_unlock(&dev_priv->sb_lock);
4713                 return 0;
4714         }
4715
4716         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4717         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4718                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4719         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4720                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4721
4722         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4723         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4724                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4725
4726         mutex_unlock(&dev_priv->sb_lock);
4727
4728         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4729
4730         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4731                                  desired_divisor << auxdiv);
4732 }
4733
4734 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
4735                                                 enum pipe pch_transcoder)
4736 {
4737         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4738         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4739         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4740
4741         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4742                    I915_READ(HTOTAL(cpu_transcoder)));
4743         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4744                    I915_READ(HBLANK(cpu_transcoder)));
4745         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4746                    I915_READ(HSYNC(cpu_transcoder)));
4747
4748         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4749                    I915_READ(VTOTAL(cpu_transcoder)));
4750         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4751                    I915_READ(VBLANK(cpu_transcoder)));
4752         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4753                    I915_READ(VSYNC(cpu_transcoder)));
4754         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4755                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
4756 }
4757
4758 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
4759 {
4760         u32 temp;
4761
4762         temp = I915_READ(SOUTH_CHICKEN1);
4763         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4764                 return;
4765
4766         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4767         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4768
4769         temp &= ~FDI_BC_BIFURCATION_SELECT;
4770         if (enable)
4771                 temp |= FDI_BC_BIFURCATION_SELECT;
4772
4773         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4774         I915_WRITE(SOUTH_CHICKEN1, temp);
4775         POSTING_READ(SOUTH_CHICKEN1);
4776 }
4777
4778 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
4779 {
4780         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4781         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4782
4783         switch (crtc->pipe) {
4784         case PIPE_A:
4785                 break;
4786         case PIPE_B:
4787                 if (crtc_state->fdi_lanes > 2)
4788                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
4789                 else
4790                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
4791
4792                 break;
4793         case PIPE_C:
4794                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
4795
4796                 break;
4797         default:
4798                 BUG();
4799         }
4800 }
4801
4802 /*
4803  * Finds the encoder associated with the given CRTC. This can only be
4804  * used when we know that the CRTC isn't feeding multiple encoders!
4805  */
4806 static struct intel_encoder *
4807 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
4808                            const struct intel_crtc_state *crtc_state)
4809 {
4810         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4811         const struct drm_connector_state *connector_state;
4812         const struct drm_connector *connector;
4813         struct intel_encoder *encoder = NULL;
4814         int num_encoders = 0;
4815         int i;
4816
4817         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4818                 if (connector_state->crtc != &crtc->base)
4819                         continue;
4820
4821                 encoder = to_intel_encoder(connector_state->best_encoder);
4822                 num_encoders++;
4823         }
4824
4825         WARN(num_encoders != 1, "%d encoders for pipe %c\n",
4826              num_encoders, pipe_name(crtc->pipe));
4827
4828         return encoder;
4829 }
4830
4831 /*
4832  * Enable PCH resources required for PCH ports:
4833  *   - PCH PLLs
4834  *   - FDI training & RX/TX
4835  *   - update transcoder timings
4836  *   - DP transcoding bits
4837  *   - transcoder
4838  */
4839 static void ironlake_pch_enable(const struct intel_atomic_state *state,
4840                                 const struct intel_crtc_state *crtc_state)
4841 {
4842         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4843         struct drm_device *dev = crtc->base.dev;
4844         struct drm_i915_private *dev_priv = to_i915(dev);
4845         int pipe = crtc->pipe;
4846         u32 temp;
4847
4848         assert_pch_transcoder_disabled(dev_priv, pipe);
4849
4850         if (IS_IVYBRIDGE(dev_priv))
4851                 ivybridge_update_fdi_bc_bifurcation(crtc_state);
4852
4853         /* Write the TU size bits before fdi link training, so that error
4854          * detection works. */
4855         I915_WRITE(FDI_RX_TUSIZE1(pipe),
4856                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4857
4858         /* For PCH output, training FDI link */
4859         dev_priv->display.fdi_link_train(crtc, crtc_state);
4860
4861         /* We need to program the right clock selection before writing the pixel
4862          * mutliplier into the DPLL. */
4863         if (HAS_PCH_CPT(dev_priv)) {
4864                 u32 sel;
4865
4866                 temp = I915_READ(PCH_DPLL_SEL);
4867                 temp |= TRANS_DPLL_ENABLE(pipe);
4868                 sel = TRANS_DPLLB_SEL(pipe);
4869                 if (crtc_state->shared_dpll ==
4870                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4871                         temp |= sel;
4872                 else
4873                         temp &= ~sel;
4874                 I915_WRITE(PCH_DPLL_SEL, temp);
4875         }
4876
4877         /* XXX: pch pll's can be enabled any time before we enable the PCH
4878          * transcoder, and we actually should do this to not upset any PCH
4879          * transcoder that already use the clock when we share it.
4880          *
4881          * Note that enable_shared_dpll tries to do the right thing, but
4882          * get_shared_dpll unconditionally resets the pll - we need that to have
4883          * the right LVDS enable sequence. */
4884         intel_enable_shared_dpll(crtc_state);
4885
4886         /* set transcoder timing, panel must allow it */
4887         assert_panel_unlocked(dev_priv, pipe);
4888         ironlake_pch_transcoder_set_timings(crtc_state, pipe);
4889
4890         intel_fdi_normal_train(crtc);
4891
4892         /* For PCH DP, enable TRANS_DP_CTL */
4893         if (HAS_PCH_CPT(dev_priv) &&
4894             intel_crtc_has_dp_encoder(crtc_state)) {
4895                 const struct drm_display_mode *adjusted_mode =
4896                         &crtc_state->base.adjusted_mode;
4897                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4898                 i915_reg_t reg = TRANS_DP_CTL(pipe);
4899                 enum port port;
4900
4901                 temp = I915_READ(reg);
4902                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4903                           TRANS_DP_SYNC_MASK |
4904                           TRANS_DP_BPC_MASK);
4905                 temp |= TRANS_DP_OUTPUT_ENABLE;
4906                 temp |= bpc << 9; /* same format but at 11:9 */
4907
4908                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4909                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4910                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4911                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4912
4913                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
4914                 WARN_ON(port < PORT_B || port > PORT_D);
4915                 temp |= TRANS_DP_PORT_SEL(port);
4916
4917                 I915_WRITE(reg, temp);
4918         }
4919
4920         ironlake_enable_pch_transcoder(crtc_state);
4921 }
4922
4923 static void lpt_pch_enable(const struct intel_atomic_state *state,
4924                            const struct intel_crtc_state *crtc_state)
4925 {
4926         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4927         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4928         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4929
4930         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
4931
4932         lpt_program_iclkip(crtc_state);
4933
4934         /* Set transcoder timing. */
4935         ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
4936
4937         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4938 }
4939
4940 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4941 {
4942         struct drm_i915_private *dev_priv = to_i915(dev);
4943         i915_reg_t dslreg = PIPEDSL(pipe);
4944         u32 temp;
4945
4946         temp = I915_READ(dslreg);
4947         udelay(500);
4948         if (wait_for(I915_READ(dslreg) != temp, 5)) {
4949                 if (wait_for(I915_READ(dslreg) != temp, 5))
4950                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4951         }
4952 }
4953
4954 /*
4955  * The hardware phase 0.0 refers to the center of the pixel.
4956  * We want to start from the top/left edge which is phase
4957  * -0.5. That matches how the hardware calculates the scaling
4958  * factors (from top-left of the first pixel to bottom-right
4959  * of the last pixel, as opposed to the pixel centers).
4960  *
4961  * For 4:2:0 subsampled chroma planes we obviously have to
4962  * adjust that so that the chroma sample position lands in
4963  * the right spot.
4964  *
4965  * Note that for packed YCbCr 4:2:2 formats there is no way to
4966  * control chroma siting. The hardware simply replicates the
4967  * chroma samples for both of the luma samples, and thus we don't
4968  * actually get the expected MPEG2 chroma siting convention :(
4969  * The same behaviour is observed on pre-SKL platforms as well.
4970  *
4971  * Theory behind the formula (note that we ignore sub-pixel
4972  * source coordinates):
4973  * s = source sample position
4974  * d = destination sample position
4975  *
4976  * Downscaling 4:1:
4977  * -0.5
4978  * | 0.0
4979  * | |     1.5 (initial phase)
4980  * | |     |
4981  * v v     v
4982  * | s | s | s | s |
4983  * |       d       |
4984  *
4985  * Upscaling 1:4:
4986  * -0.5
4987  * | -0.375 (initial phase)
4988  * | |     0.0
4989  * | |     |
4990  * v v     v
4991  * |       s       |
4992  * | d | d | d | d |
4993  */
4994 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
4995 {
4996         int phase = -0x8000;
4997         u16 trip = 0;
4998
4999         if (chroma_cosited)
5000                 phase += (sub - 1) * 0x8000 / sub;
5001
5002         phase += scale / (2 * sub);
5003
5004         /*
5005          * Hardware initial phase limited to [-0.5:1.5].
5006          * Since the max hardware scale factor is 3.0, we
5007          * should never actually excdeed 1.0 here.
5008          */
5009         WARN_ON(phase < -0x8000 || phase > 0x18000);
5010
5011         if (phase < 0)
5012                 phase = 0x10000 + phase;
5013         else
5014                 trip = PS_PHASE_TRIP;
5015
5016         return ((phase >> 2) & PS_PHASE_MASK) | trip;
5017 }
5018
5019 static int
5020 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5021                   unsigned int scaler_user, int *scaler_id,
5022                   int src_w, int src_h, int dst_w, int dst_h,
5023                   const struct drm_format_info *format, bool need_scaler)
5024 {
5025         struct intel_crtc_scaler_state *scaler_state =
5026                 &crtc_state->scaler_state;
5027         struct intel_crtc *intel_crtc =
5028                 to_intel_crtc(crtc_state->base.crtc);
5029         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5030         const struct drm_display_mode *adjusted_mode =
5031                 &crtc_state->base.adjusted_mode;
5032
5033         /*
5034          * Src coordinates are already rotated by 270 degrees for
5035          * the 90/270 degree plane rotation cases (to match the
5036          * GTT mapping), hence no need to account for rotation here.
5037          */
5038         if (src_w != dst_w || src_h != dst_h)
5039                 need_scaler = true;
5040
5041         /*
5042          * Scaling/fitting not supported in IF-ID mode in GEN9+
5043          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5044          * Once NV12 is enabled, handle it here while allocating scaler
5045          * for NV12.
5046          */
5047         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
5048             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5049                 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5050                 return -EINVAL;
5051         }
5052
5053         /*
5054          * if plane is being disabled or scaler is no more required or force detach
5055          *  - free scaler binded to this plane/crtc
5056          *  - in order to do this, update crtc->scaler_usage
5057          *
5058          * Here scaler state in crtc_state is set free so that
5059          * scaler can be assigned to other user. Actual register
5060          * update to free the scaler is done in plane/panel-fit programming.
5061          * For this purpose crtc/plane_state->scaler_id isn't reset here.
5062          */
5063         if (force_detach || !need_scaler) {
5064                 if (*scaler_id >= 0) {
5065                         scaler_state->scaler_users &= ~(1 << scaler_user);
5066                         scaler_state->scalers[*scaler_id].in_use = 0;
5067
5068                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5069                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5070                                 intel_crtc->pipe, scaler_user, *scaler_id,
5071                                 scaler_state->scaler_users);
5072                         *scaler_id = -1;
5073                 }
5074                 return 0;
5075         }
5076
5077         if (format && is_planar_yuv_format(format->format) &&
5078             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5079                 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5080                 return -EINVAL;
5081         }
5082
5083         /* range checks */
5084         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5085             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5086             (IS_GEN(dev_priv, 11) &&
5087              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5088               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5089             (!IS_GEN(dev_priv, 11) &&
5090              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5091               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5092                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5093                         "size is out of scaler range\n",
5094                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5095                 return -EINVAL;
5096         }
5097
5098         /* mark this plane as a scaler user in crtc_state */
5099         scaler_state->scaler_users |= (1 << scaler_user);
5100         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5101                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5102                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5103                 scaler_state->scaler_users);
5104
5105         return 0;
5106 }
5107
5108 /**
5109  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5110  *
5111  * @state: crtc's scaler state
5112  *
5113  * Return
5114  *     0 - scaler_usage updated successfully
5115  *    error - requested scaling cannot be supported or other error condition
5116  */
5117 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5118 {
5119         const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5120         bool need_scaler = false;
5121
5122         if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5123                 need_scaler = true;
5124
5125         return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
5126                                  &state->scaler_state.scaler_id,
5127                                  state->pipe_src_w, state->pipe_src_h,
5128                                  adjusted_mode->crtc_hdisplay,
5129                                  adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5130 }
5131
5132 /**
5133  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5134  * @crtc_state: crtc's scaler state
5135  * @plane_state: atomic plane state to update
5136  *
5137  * Return
5138  *     0 - scaler_usage updated successfully
5139  *    error - requested scaling cannot be supported or other error condition
5140  */
5141 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5142                                    struct intel_plane_state *plane_state)
5143 {
5144         struct intel_plane *intel_plane =
5145                 to_intel_plane(plane_state->base.plane);
5146         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5147         struct drm_framebuffer *fb = plane_state->base.fb;
5148         int ret;
5149         bool force_detach = !fb || !plane_state->base.visible;
5150         bool need_scaler = false;
5151
5152         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5153         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5154             fb && is_planar_yuv_format(fb->format->format))
5155                 need_scaler = true;
5156
5157         ret = skl_update_scaler(crtc_state, force_detach,
5158                                 drm_plane_index(&intel_plane->base),
5159                                 &plane_state->scaler_id,
5160                                 drm_rect_width(&plane_state->base.src) >> 16,
5161                                 drm_rect_height(&plane_state->base.src) >> 16,
5162                                 drm_rect_width(&plane_state->base.dst),
5163                                 drm_rect_height(&plane_state->base.dst),
5164                                 fb ? fb->format : NULL, need_scaler);
5165
5166         if (ret || plane_state->scaler_id < 0)
5167                 return ret;
5168
5169         /* check colorkey */
5170         if (plane_state->ckey.flags) {
5171                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5172                               intel_plane->base.base.id,
5173                               intel_plane->base.name);
5174                 return -EINVAL;
5175         }
5176
5177         /* Check src format */
5178         switch (fb->format->format) {
5179         case DRM_FORMAT_RGB565:
5180         case DRM_FORMAT_XBGR8888:
5181         case DRM_FORMAT_XRGB8888:
5182         case DRM_FORMAT_ABGR8888:
5183         case DRM_FORMAT_ARGB8888:
5184         case DRM_FORMAT_XRGB2101010:
5185         case DRM_FORMAT_XBGR2101010:
5186         case DRM_FORMAT_YUYV:
5187         case DRM_FORMAT_YVYU:
5188         case DRM_FORMAT_UYVY:
5189         case DRM_FORMAT_VYUY:
5190         case DRM_FORMAT_NV12:
5191         case DRM_FORMAT_P010:
5192         case DRM_FORMAT_P012:
5193         case DRM_FORMAT_P016:
5194         case DRM_FORMAT_Y210:
5195         case DRM_FORMAT_Y212:
5196         case DRM_FORMAT_Y216:
5197         case DRM_FORMAT_Y410:
5198         case DRM_FORMAT_Y412:
5199         case DRM_FORMAT_Y416:
5200                 break;
5201         default:
5202                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5203                               intel_plane->base.base.id, intel_plane->base.name,
5204                               fb->base.id, fb->format->format);
5205                 return -EINVAL;
5206         }
5207
5208         return 0;
5209 }
5210
5211 static void skylake_scaler_disable(struct intel_crtc *crtc)
5212 {
5213         int i;
5214
5215         for (i = 0; i < crtc->num_scalers; i++)
5216                 skl_detach_scaler(crtc, i);
5217 }
5218
5219 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5220 {
5221         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5222         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5223         enum pipe pipe = crtc->pipe;
5224         const struct intel_crtc_scaler_state *scaler_state =
5225                 &crtc_state->scaler_state;
5226
5227         if (crtc_state->pch_pfit.enabled) {
5228                 u16 uv_rgb_hphase, uv_rgb_vphase;
5229                 int pfit_w, pfit_h, hscale, vscale;
5230                 int id;
5231
5232                 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5233                         return;
5234
5235                 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5236                 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5237
5238                 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5239                 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5240
5241                 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5242                 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5243
5244                 id = scaler_state->scaler_id;
5245                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5246                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5247                 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5248                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5249                 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5250                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5251                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5252                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5253         }
5254 }
5255
5256 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5257 {
5258         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5259         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5260         int pipe = crtc->pipe;
5261
5262         if (crtc_state->pch_pfit.enabled) {
5263                 /* Force use of hard-coded filter coefficients
5264                  * as some pre-programmed values are broken,
5265                  * e.g. x201.
5266                  */
5267                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5268                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5269                                                  PF_PIPE_SEL_IVB(pipe));
5270                 else
5271                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5272                 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5273                 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5274         }
5275 }
5276
5277 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5278 {
5279         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5280         struct drm_device *dev = crtc->base.dev;
5281         struct drm_i915_private *dev_priv = to_i915(dev);
5282
5283         if (!crtc_state->ips_enabled)
5284                 return;
5285
5286         /*
5287          * We can only enable IPS after we enable a plane and wait for a vblank
5288          * This function is called from post_plane_update, which is run after
5289          * a vblank wait.
5290          */
5291         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5292
5293         if (IS_BROADWELL(dev_priv)) {
5294                 mutex_lock(&dev_priv->pcu_lock);
5295                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5296                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
5297                 mutex_unlock(&dev_priv->pcu_lock);
5298                 /* Quoting Art Runyan: "its not safe to expect any particular
5299                  * value in IPS_CTL bit 31 after enabling IPS through the
5300                  * mailbox." Moreover, the mailbox may return a bogus state,
5301                  * so we need to just enable it and continue on.
5302                  */
5303         } else {
5304                 I915_WRITE(IPS_CTL, IPS_ENABLE);
5305                 /* The bit only becomes 1 in the next vblank, so this wait here
5306                  * is essentially intel_wait_for_vblank. If we don't have this
5307                  * and don't wait for vblanks until the end of crtc_enable, then
5308                  * the HW state readout code will complain that the expected
5309                  * IPS_CTL value is not the one we read. */
5310                 if (intel_wait_for_register(dev_priv,
5311                                             IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5312                                             50))
5313                         DRM_ERROR("Timed out waiting for IPS enable\n");
5314         }
5315 }
5316
5317 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5318 {
5319         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5320         struct drm_device *dev = crtc->base.dev;
5321         struct drm_i915_private *dev_priv = to_i915(dev);
5322
5323         if (!crtc_state->ips_enabled)
5324                 return;
5325
5326         if (IS_BROADWELL(dev_priv)) {
5327                 mutex_lock(&dev_priv->pcu_lock);
5328                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5329                 mutex_unlock(&dev_priv->pcu_lock);
5330                 /*
5331                  * Wait for PCODE to finish disabling IPS. The BSpec specified
5332                  * 42ms timeout value leads to occasional timeouts so use 100ms
5333                  * instead.
5334                  */
5335                 if (intel_wait_for_register(dev_priv,
5336                                             IPS_CTL, IPS_ENABLE, 0,
5337                                             100))
5338                         DRM_ERROR("Timed out waiting for IPS disable\n");
5339         } else {
5340                 I915_WRITE(IPS_CTL, 0);
5341                 POSTING_READ(IPS_CTL);
5342         }
5343
5344         /* We need to wait for a vblank before we can disable the plane. */
5345         intel_wait_for_vblank(dev_priv, crtc->pipe);
5346 }
5347
5348 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5349 {
5350         if (intel_crtc->overlay) {
5351                 struct drm_device *dev = intel_crtc->base.dev;
5352
5353                 mutex_lock(&dev->struct_mutex);
5354                 (void) intel_overlay_switch_off(intel_crtc->overlay);
5355                 mutex_unlock(&dev->struct_mutex);
5356         }
5357
5358         /* Let userspace switch the overlay on again. In most cases userspace
5359          * has to recompute where to put it anyway.
5360          */
5361 }
5362
5363 /**
5364  * intel_post_enable_primary - Perform operations after enabling primary plane
5365  * @crtc: the CRTC whose primary plane was just enabled
5366  * @new_crtc_state: the enabling state
5367  *
5368  * Performs potentially sleeping operations that must be done after the primary
5369  * plane is enabled, such as updating FBC and IPS.  Note that this may be
5370  * called due to an explicit primary plane update, or due to an implicit
5371  * re-enable that is caused when a sprite plane is updated to no longer
5372  * completely hide the primary plane.
5373  */
5374 static void
5375 intel_post_enable_primary(struct drm_crtc *crtc,
5376                           const struct intel_crtc_state *new_crtc_state)
5377 {
5378         struct drm_device *dev = crtc->dev;
5379         struct drm_i915_private *dev_priv = to_i915(dev);
5380         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5381         int pipe = intel_crtc->pipe;
5382
5383         /*
5384          * Gen2 reports pipe underruns whenever all planes are disabled.
5385          * So don't enable underrun reporting before at least some planes
5386          * are enabled.
5387          * FIXME: Need to fix the logic to work when we turn off all planes
5388          * but leave the pipe running.
5389          */
5390         if (IS_GEN(dev_priv, 2))
5391                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5392
5393         /* Underruns don't always raise interrupts, so check manually. */
5394         intel_check_cpu_fifo_underruns(dev_priv);
5395         intel_check_pch_fifo_underruns(dev_priv);
5396 }
5397
5398 /* FIXME get rid of this and use pre_plane_update */
5399 static void
5400 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5401 {
5402         struct drm_device *dev = crtc->dev;
5403         struct drm_i915_private *dev_priv = to_i915(dev);
5404         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5405         int pipe = intel_crtc->pipe;
5406
5407         /*
5408          * Gen2 reports pipe underruns whenever all planes are disabled.
5409          * So disable underrun reporting before all the planes get disabled.
5410          */
5411         if (IS_GEN(dev_priv, 2))
5412                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5413
5414         hsw_disable_ips(to_intel_crtc_state(crtc->state));
5415
5416         /*
5417          * Vblank time updates from the shadow to live plane control register
5418          * are blocked if the memory self-refresh mode is active at that
5419          * moment. So to make sure the plane gets truly disabled, disable
5420          * first the self-refresh mode. The self-refresh enable bit in turn
5421          * will be checked/applied by the HW only at the next frame start
5422          * event which is after the vblank start event, so we need to have a
5423          * wait-for-vblank between disabling the plane and the pipe.
5424          */
5425         if (HAS_GMCH(dev_priv) &&
5426             intel_set_memory_cxsr(dev_priv, false))
5427                 intel_wait_for_vblank(dev_priv, pipe);
5428 }
5429
5430 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5431                                        const struct intel_crtc_state *new_crtc_state)
5432 {
5433         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5434         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5435
5436         if (!old_crtc_state->ips_enabled)
5437                 return false;
5438
5439         if (needs_modeset(&new_crtc_state->base))
5440                 return true;
5441
5442         /*
5443          * Workaround : Do not read or write the pipe palette/gamma data while
5444          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5445          *
5446          * Disable IPS before we program the LUT.
5447          */
5448         if (IS_HASWELL(dev_priv) &&
5449             (new_crtc_state->base.color_mgmt_changed ||
5450              new_crtc_state->update_pipe) &&
5451             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5452                 return true;
5453
5454         return !new_crtc_state->ips_enabled;
5455 }
5456
5457 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5458                                        const struct intel_crtc_state *new_crtc_state)
5459 {
5460         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5461         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5462
5463         if (!new_crtc_state->ips_enabled)
5464                 return false;
5465
5466         if (needs_modeset(&new_crtc_state->base))
5467                 return true;
5468
5469         /*
5470          * Workaround : Do not read or write the pipe palette/gamma data while
5471          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5472          *
5473          * Re-enable IPS after the LUT has been programmed.
5474          */
5475         if (IS_HASWELL(dev_priv) &&
5476             (new_crtc_state->base.color_mgmt_changed ||
5477              new_crtc_state->update_pipe) &&
5478             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5479                 return true;
5480
5481         /*
5482          * We can't read out IPS on broadwell, assume the worst and
5483          * forcibly enable IPS on the first fastset.
5484          */
5485         if (new_crtc_state->update_pipe &&
5486             old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5487                 return true;
5488
5489         return !old_crtc_state->ips_enabled;
5490 }
5491
5492 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5493                           const struct intel_crtc_state *crtc_state)
5494 {
5495         if (!crtc_state->nv12_planes)
5496                 return false;
5497
5498         /* WA Display #0827: Gen9:all */
5499         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5500                 return true;
5501
5502         return false;
5503 }
5504
5505 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5506 {
5507         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5508         struct drm_device *dev = crtc->base.dev;
5509         struct drm_i915_private *dev_priv = to_i915(dev);
5510         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5511         struct intel_crtc_state *pipe_config =
5512                 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5513                                                 crtc);
5514         struct drm_plane *primary = crtc->base.primary;
5515         struct drm_plane_state *old_primary_state =
5516                 drm_atomic_get_old_plane_state(old_state, primary);
5517
5518         intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5519
5520         if (pipe_config->update_wm_post && pipe_config->base.active)
5521                 intel_update_watermarks(crtc);
5522
5523         if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5524                 hsw_enable_ips(pipe_config);
5525
5526         if (old_primary_state) {
5527                 struct drm_plane_state *new_primary_state =
5528                         drm_atomic_get_new_plane_state(old_state, primary);
5529
5530                 intel_fbc_post_update(crtc);
5531
5532                 if (new_primary_state->visible &&
5533                     (needs_modeset(&pipe_config->base) ||
5534                      !old_primary_state->visible))
5535                         intel_post_enable_primary(&crtc->base, pipe_config);
5536         }
5537
5538         /* Display WA 827 */
5539         if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5540             !needs_nv12_wa(dev_priv, pipe_config)) {
5541                 skl_wa_clkgate(dev_priv, crtc->pipe, false);
5542         }
5543 }
5544
5545 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5546                                    struct intel_crtc_state *pipe_config)
5547 {
5548         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5549         struct drm_device *dev = crtc->base.dev;
5550         struct drm_i915_private *dev_priv = to_i915(dev);
5551         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5552         struct drm_plane *primary = crtc->base.primary;
5553         struct drm_plane_state *old_primary_state =
5554                 drm_atomic_get_old_plane_state(old_state, primary);
5555         bool modeset = needs_modeset(&pipe_config->base);
5556         struct intel_atomic_state *old_intel_state =
5557                 to_intel_atomic_state(old_state);
5558
5559         if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5560                 hsw_disable_ips(old_crtc_state);
5561
5562         if (old_primary_state) {
5563                 struct intel_plane_state *new_primary_state =
5564                         intel_atomic_get_new_plane_state(old_intel_state,
5565                                                          to_intel_plane(primary));
5566
5567                 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5568                 /*
5569                  * Gen2 reports pipe underruns whenever all planes are disabled.
5570                  * So disable underrun reporting before all the planes get disabled.
5571                  */
5572                 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
5573                     (modeset || !new_primary_state->base.visible))
5574                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5575         }
5576
5577         /* Display WA 827 */
5578         if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5579             needs_nv12_wa(dev_priv, pipe_config)) {
5580                 skl_wa_clkgate(dev_priv, crtc->pipe, true);
5581         }
5582
5583         /*
5584          * Vblank time updates from the shadow to live plane control register
5585          * are blocked if the memory self-refresh mode is active at that
5586          * moment. So to make sure the plane gets truly disabled, disable
5587          * first the self-refresh mode. The self-refresh enable bit in turn
5588          * will be checked/applied by the HW only at the next frame start
5589          * event which is after the vblank start event, so we need to have a
5590          * wait-for-vblank between disabling the plane and the pipe.
5591          */
5592         if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
5593             pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5594                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5595
5596         /*
5597          * IVB workaround: must disable low power watermarks for at least
5598          * one frame before enabling scaling.  LP watermarks can be re-enabled
5599          * when scaling is disabled.
5600          *
5601          * WaCxSRDisabledForSpriteScaling:ivb
5602          */
5603         if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5604             old_crtc_state->base.active)
5605                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5606
5607         /*
5608          * If we're doing a modeset, we're done.  No need to do any pre-vblank
5609          * watermark programming here.
5610          */
5611         if (needs_modeset(&pipe_config->base))
5612                 return;
5613
5614         /*
5615          * For platforms that support atomic watermarks, program the
5616          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
5617          * will be the intermediate values that are safe for both pre- and
5618          * post- vblank; when vblank happens, the 'active' values will be set
5619          * to the final 'target' values and we'll do this again to get the
5620          * optimal watermarks.  For gen9+ platforms, the values we program here
5621          * will be the final target values which will get automatically latched
5622          * at vblank time; no further programming will be necessary.
5623          *
5624          * If a platform hasn't been transitioned to atomic watermarks yet,
5625          * we'll continue to update watermarks the old way, if flags tell
5626          * us to.
5627          */
5628         if (dev_priv->display.initial_watermarks != NULL)
5629                 dev_priv->display.initial_watermarks(old_intel_state,
5630                                                      pipe_config);
5631         else if (pipe_config->update_wm_pre)
5632                 intel_update_watermarks(crtc);
5633 }
5634
5635 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
5636                                       struct intel_crtc *crtc)
5637 {
5638         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5639         const struct intel_crtc_state *new_crtc_state =
5640                 intel_atomic_get_new_crtc_state(state, crtc);
5641         unsigned int update_mask = new_crtc_state->update_planes;
5642         const struct intel_plane_state *old_plane_state;
5643         struct intel_plane *plane;
5644         unsigned fb_bits = 0;
5645         int i;
5646
5647         intel_crtc_dpms_overlay_disable(crtc);
5648
5649         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
5650                 if (crtc->pipe != plane->pipe ||
5651                     !(update_mask & BIT(plane->id)))
5652                         continue;
5653
5654                 plane->disable_plane(plane, new_crtc_state);
5655
5656                 if (old_plane_state->base.visible)
5657                         fb_bits |= plane->frontbuffer_bit;
5658         }
5659
5660         intel_frontbuffer_flip(dev_priv, fb_bits);
5661 }
5662
5663 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
5664                                           struct intel_crtc_state *crtc_state,
5665                                           struct drm_atomic_state *old_state)
5666 {
5667         struct drm_connector_state *conn_state;
5668         struct drm_connector *conn;
5669         int i;
5670
5671         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5672                 struct intel_encoder *encoder =
5673                         to_intel_encoder(conn_state->best_encoder);
5674
5675                 if (conn_state->crtc != crtc)
5676                         continue;
5677
5678                 if (encoder->pre_pll_enable)
5679                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
5680         }
5681 }
5682
5683 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
5684                                       struct intel_crtc_state *crtc_state,
5685                                       struct drm_atomic_state *old_state)
5686 {
5687         struct drm_connector_state *conn_state;
5688         struct drm_connector *conn;
5689         int i;
5690
5691         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5692                 struct intel_encoder *encoder =
5693                         to_intel_encoder(conn_state->best_encoder);
5694
5695                 if (conn_state->crtc != crtc)
5696                         continue;
5697
5698                 if (encoder->pre_enable)
5699                         encoder->pre_enable(encoder, crtc_state, conn_state);
5700         }
5701 }
5702
5703 static void intel_encoders_enable(struct drm_crtc *crtc,
5704                                   struct intel_crtc_state *crtc_state,
5705                                   struct drm_atomic_state *old_state)
5706 {
5707         struct drm_connector_state *conn_state;
5708         struct drm_connector *conn;
5709         int i;
5710
5711         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5712                 struct intel_encoder *encoder =
5713                         to_intel_encoder(conn_state->best_encoder);
5714
5715                 if (conn_state->crtc != crtc)
5716                         continue;
5717
5718                 if (encoder->enable)
5719                         encoder->enable(encoder, crtc_state, conn_state);
5720                 intel_opregion_notify_encoder(encoder, true);
5721         }
5722 }
5723
5724 static void intel_encoders_disable(struct drm_crtc *crtc,
5725                                    struct intel_crtc_state *old_crtc_state,
5726                                    struct drm_atomic_state *old_state)
5727 {
5728         struct drm_connector_state *old_conn_state;
5729         struct drm_connector *conn;
5730         int i;
5731
5732         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5733                 struct intel_encoder *encoder =
5734                         to_intel_encoder(old_conn_state->best_encoder);
5735
5736                 if (old_conn_state->crtc != crtc)
5737                         continue;
5738
5739                 intel_opregion_notify_encoder(encoder, false);
5740                 if (encoder->disable)
5741                         encoder->disable(encoder, old_crtc_state, old_conn_state);
5742         }
5743 }
5744
5745 static void intel_encoders_post_disable(struct drm_crtc *crtc,
5746                                         struct intel_crtc_state *old_crtc_state,
5747                                         struct drm_atomic_state *old_state)
5748 {
5749         struct drm_connector_state *old_conn_state;
5750         struct drm_connector *conn;
5751         int i;
5752
5753         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5754                 struct intel_encoder *encoder =
5755                         to_intel_encoder(old_conn_state->best_encoder);
5756
5757                 if (old_conn_state->crtc != crtc)
5758                         continue;
5759
5760                 if (encoder->post_disable)
5761                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
5762         }
5763 }
5764
5765 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
5766                                             struct intel_crtc_state *old_crtc_state,
5767                                             struct drm_atomic_state *old_state)
5768 {
5769         struct drm_connector_state *old_conn_state;
5770         struct drm_connector *conn;
5771         int i;
5772
5773         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5774                 struct intel_encoder *encoder =
5775                         to_intel_encoder(old_conn_state->best_encoder);
5776
5777                 if (old_conn_state->crtc != crtc)
5778                         continue;
5779
5780                 if (encoder->post_pll_disable)
5781                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
5782         }
5783 }
5784
5785 static void intel_encoders_update_pipe(struct drm_crtc *crtc,
5786                                        struct intel_crtc_state *crtc_state,
5787                                        struct drm_atomic_state *old_state)
5788 {
5789         struct drm_connector_state *conn_state;
5790         struct drm_connector *conn;
5791         int i;
5792
5793         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5794                 struct intel_encoder *encoder =
5795                         to_intel_encoder(conn_state->best_encoder);
5796
5797                 if (conn_state->crtc != crtc)
5798                         continue;
5799
5800                 if (encoder->update_pipe)
5801                         encoder->update_pipe(encoder, crtc_state, conn_state);
5802         }
5803 }
5804
5805 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5806                                  struct drm_atomic_state *old_state)
5807 {
5808         struct drm_crtc *crtc = pipe_config->base.crtc;
5809         struct drm_device *dev = crtc->dev;
5810         struct drm_i915_private *dev_priv = to_i915(dev);
5811         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5812         int pipe = intel_crtc->pipe;
5813         struct intel_atomic_state *old_intel_state =
5814                 to_intel_atomic_state(old_state);
5815
5816         if (WARN_ON(intel_crtc->active))
5817                 return;
5818
5819         /*
5820          * Sometimes spurious CPU pipe underruns happen during FDI
5821          * training, at least with VGA+HDMI cloning. Suppress them.
5822          *
5823          * On ILK we get an occasional spurious CPU pipe underruns
5824          * between eDP port A enable and vdd enable. Also PCH port
5825          * enable seems to result in the occasional CPU pipe underrun.
5826          *
5827          * Spurious PCH underruns also occur during PCH enabling.
5828          */
5829         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5830         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5831
5832         if (pipe_config->has_pch_encoder)
5833                 intel_prepare_shared_dpll(pipe_config);
5834
5835         if (intel_crtc_has_dp_encoder(pipe_config))
5836                 intel_dp_set_m_n(pipe_config, M1_N1);
5837
5838         intel_set_pipe_timings(pipe_config);
5839         intel_set_pipe_src_size(pipe_config);
5840
5841         if (pipe_config->has_pch_encoder) {
5842                 intel_cpu_transcoder_set_m_n(pipe_config,
5843                                              &pipe_config->fdi_m_n, NULL);
5844         }
5845
5846         ironlake_set_pipeconf(pipe_config);
5847
5848         intel_crtc->active = true;
5849
5850         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5851
5852         if (pipe_config->has_pch_encoder) {
5853                 /* Note: FDI PLL enabling _must_ be done before we enable the
5854                  * cpu pipes, hence this is separate from all the other fdi/pch
5855                  * enabling. */
5856                 ironlake_fdi_pll_enable(pipe_config);
5857         } else {
5858                 assert_fdi_tx_disabled(dev_priv, pipe);
5859                 assert_fdi_rx_disabled(dev_priv, pipe);
5860         }
5861
5862         ironlake_pfit_enable(pipe_config);
5863
5864         /*
5865          * On ILK+ LUT must be loaded before the pipe is running but with
5866          * clocks enabled
5867          */
5868         intel_color_load_luts(pipe_config);
5869         intel_color_commit(pipe_config);
5870
5871         if (dev_priv->display.initial_watermarks != NULL)
5872                 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
5873         intel_enable_pipe(pipe_config);
5874
5875         if (pipe_config->has_pch_encoder)
5876                 ironlake_pch_enable(old_intel_state, pipe_config);
5877
5878         assert_vblank_disabled(crtc);
5879         intel_crtc_vblank_on(pipe_config);
5880
5881         intel_encoders_enable(crtc, pipe_config, old_state);
5882
5883         if (HAS_PCH_CPT(dev_priv))
5884                 cpt_verify_modeset(dev, intel_crtc->pipe);
5885
5886         /*
5887          * Must wait for vblank to avoid spurious PCH FIFO underruns.
5888          * And a second vblank wait is needed at least on ILK with
5889          * some interlaced HDMI modes. Let's do the double wait always
5890          * in case there are more corner cases we don't know about.
5891          */
5892         if (pipe_config->has_pch_encoder) {
5893                 intel_wait_for_vblank(dev_priv, pipe);
5894                 intel_wait_for_vblank(dev_priv, pipe);
5895         }
5896         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5897         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5898 }
5899
5900 /* IPS only exists on ULT machines and is tied to pipe A. */
5901 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5902 {
5903         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
5904 }
5905
5906 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
5907                                             enum pipe pipe, bool apply)
5908 {
5909         u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
5910         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
5911
5912         if (apply)
5913                 val |= mask;
5914         else
5915                 val &= ~mask;
5916
5917         I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
5918 }
5919
5920 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5921 {
5922         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5923         enum pipe pipe = crtc->pipe;
5924         u32 val;
5925
5926         val = MBUS_DBOX_A_CREDIT(2);
5927         val |= MBUS_DBOX_BW_CREDIT(1);
5928         val |= MBUS_DBOX_B_CREDIT(8);
5929
5930         I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5931 }
5932
5933 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5934                                 struct drm_atomic_state *old_state)
5935 {
5936         struct drm_crtc *crtc = pipe_config->base.crtc;
5937         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5938         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5939         int pipe = intel_crtc->pipe, hsw_workaround_pipe;
5940         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5941         struct intel_atomic_state *old_intel_state =
5942                 to_intel_atomic_state(old_state);
5943         bool psl_clkgate_wa;
5944
5945         if (WARN_ON(intel_crtc->active))
5946                 return;
5947
5948         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
5949
5950         if (pipe_config->shared_dpll)
5951                 intel_enable_shared_dpll(pipe_config);
5952
5953         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5954
5955         if (intel_crtc_has_dp_encoder(pipe_config))
5956                 intel_dp_set_m_n(pipe_config, M1_N1);
5957
5958         if (!transcoder_is_dsi(cpu_transcoder))
5959                 intel_set_pipe_timings(pipe_config);
5960
5961         intel_set_pipe_src_size(pipe_config);
5962
5963         if (cpu_transcoder != TRANSCODER_EDP &&
5964             !transcoder_is_dsi(cpu_transcoder)) {
5965                 I915_WRITE(PIPE_MULT(cpu_transcoder),
5966                            pipe_config->pixel_multiplier - 1);
5967         }
5968
5969         if (pipe_config->has_pch_encoder) {
5970                 intel_cpu_transcoder_set_m_n(pipe_config,
5971                                              &pipe_config->fdi_m_n, NULL);
5972         }
5973
5974         if (!transcoder_is_dsi(cpu_transcoder))
5975                 haswell_set_pipeconf(pipe_config);
5976
5977         haswell_set_pipemisc(pipe_config);
5978
5979         intel_crtc->active = true;
5980
5981         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
5982         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
5983                          pipe_config->pch_pfit.enabled;
5984         if (psl_clkgate_wa)
5985                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
5986
5987         if (INTEL_GEN(dev_priv) >= 9)
5988                 skylake_pfit_enable(pipe_config);
5989         else
5990                 ironlake_pfit_enable(pipe_config);
5991
5992         /*
5993          * On ILK+ LUT must be loaded before the pipe is running but with
5994          * clocks enabled
5995          */
5996         intel_color_load_luts(pipe_config);
5997         intel_color_commit(pipe_config);
5998
5999         if (INTEL_GEN(dev_priv) >= 11)
6000                 icl_set_pipe_chicken(intel_crtc);
6001
6002         intel_ddi_set_pipe_settings(pipe_config);
6003         if (!transcoder_is_dsi(cpu_transcoder))
6004                 intel_ddi_enable_transcoder_func(pipe_config);
6005
6006         if (dev_priv->display.initial_watermarks != NULL)
6007                 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
6008
6009         if (INTEL_GEN(dev_priv) >= 11)
6010                 icl_pipe_mbus_enable(intel_crtc);
6011
6012         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6013         if (!transcoder_is_dsi(cpu_transcoder))
6014                 intel_enable_pipe(pipe_config);
6015
6016         if (pipe_config->has_pch_encoder)
6017                 lpt_pch_enable(old_intel_state, pipe_config);
6018
6019         if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
6020                 intel_ddi_set_vc_payload_alloc(pipe_config, true);
6021
6022         assert_vblank_disabled(crtc);
6023         intel_crtc_vblank_on(pipe_config);
6024
6025         intel_encoders_enable(crtc, pipe_config, old_state);
6026
6027         if (psl_clkgate_wa) {
6028                 intel_wait_for_vblank(dev_priv, pipe);
6029                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6030         }
6031
6032         /* If we change the relative order between pipe/planes enabling, we need
6033          * to change the workaround. */
6034         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6035         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6036                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6037                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6038         }
6039 }
6040
6041 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6042 {
6043         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6044         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6045         enum pipe pipe = crtc->pipe;
6046
6047         /* To avoid upsetting the power well on haswell only disable the pfit if
6048          * it's in use. The hw state code will make sure we get this right. */
6049         if (old_crtc_state->pch_pfit.enabled) {
6050                 I915_WRITE(PF_CTL(pipe), 0);
6051                 I915_WRITE(PF_WIN_POS(pipe), 0);
6052                 I915_WRITE(PF_WIN_SZ(pipe), 0);
6053         }
6054 }
6055
6056 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6057                                   struct drm_atomic_state *old_state)
6058 {
6059         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6060         struct drm_device *dev = crtc->dev;
6061         struct drm_i915_private *dev_priv = to_i915(dev);
6062         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6063         int pipe = intel_crtc->pipe;
6064
6065         /*
6066          * Sometimes spurious CPU pipe underruns happen when the
6067          * pipe is already disabled, but FDI RX/TX is still enabled.
6068          * Happens at least with VGA+HDMI cloning. Suppress them.
6069          */
6070         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6071         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6072
6073         intel_encoders_disable(crtc, old_crtc_state, old_state);
6074
6075         drm_crtc_vblank_off(crtc);
6076         assert_vblank_disabled(crtc);
6077
6078         intel_disable_pipe(old_crtc_state);
6079
6080         ironlake_pfit_disable(old_crtc_state);
6081
6082         if (old_crtc_state->has_pch_encoder)
6083                 ironlake_fdi_disable(crtc);
6084
6085         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6086
6087         if (old_crtc_state->has_pch_encoder) {
6088                 ironlake_disable_pch_transcoder(dev_priv, pipe);
6089
6090                 if (HAS_PCH_CPT(dev_priv)) {
6091                         i915_reg_t reg;
6092                         u32 temp;
6093
6094                         /* disable TRANS_DP_CTL */
6095                         reg = TRANS_DP_CTL(pipe);
6096                         temp = I915_READ(reg);
6097                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6098                                   TRANS_DP_PORT_SEL_MASK);
6099                         temp |= TRANS_DP_PORT_SEL_NONE;
6100                         I915_WRITE(reg, temp);
6101
6102                         /* disable DPLL_SEL */
6103                         temp = I915_READ(PCH_DPLL_SEL);
6104                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6105                         I915_WRITE(PCH_DPLL_SEL, temp);
6106                 }
6107
6108                 ironlake_fdi_pll_disable(intel_crtc);
6109         }
6110
6111         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6112         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6113 }
6114
6115 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6116                                  struct drm_atomic_state *old_state)
6117 {
6118         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6119         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6120         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6121         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6122
6123         intel_encoders_disable(crtc, old_crtc_state, old_state);
6124
6125         drm_crtc_vblank_off(crtc);
6126         assert_vblank_disabled(crtc);
6127
6128         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6129         if (!transcoder_is_dsi(cpu_transcoder))
6130                 intel_disable_pipe(old_crtc_state);
6131
6132         if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
6133                 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
6134
6135         if (!transcoder_is_dsi(cpu_transcoder))
6136                 intel_ddi_disable_transcoder_func(old_crtc_state);
6137
6138         intel_dsc_disable(old_crtc_state);
6139
6140         if (INTEL_GEN(dev_priv) >= 9)
6141                 skylake_scaler_disable(intel_crtc);
6142         else
6143                 ironlake_pfit_disable(old_crtc_state);
6144
6145         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6146
6147         intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6148 }
6149
6150 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6151 {
6152         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6153         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6154
6155         if (!crtc_state->gmch_pfit.control)
6156                 return;
6157
6158         /*
6159          * The panel fitter should only be adjusted whilst the pipe is disabled,
6160          * according to register description and PRM.
6161          */
6162         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6163         assert_pipe_disabled(dev_priv, crtc->pipe);
6164
6165         I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6166         I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6167
6168         /* Border color in case we don't scale up to the full screen. Black by
6169          * default, change to something else for debugging. */
6170         I915_WRITE(BCLRPAT(crtc->pipe), 0);
6171 }
6172
6173 bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
6174 {
6175         if (port == PORT_NONE)
6176                 return false;
6177
6178         if (IS_ICELAKE(dev_priv))
6179                 return port <= PORT_B;
6180
6181         return false;
6182 }
6183
6184 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
6185 {
6186         if (IS_ICELAKE(dev_priv))
6187                 return port >= PORT_C && port <= PORT_F;
6188
6189         return false;
6190 }
6191
6192 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6193 {
6194         if (!intel_port_is_tc(dev_priv, port))
6195                 return PORT_TC_NONE;
6196
6197         return port - PORT_C;
6198 }
6199
6200 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6201 {
6202         switch (port) {
6203         case PORT_A:
6204                 return POWER_DOMAIN_PORT_DDI_A_LANES;
6205         case PORT_B:
6206                 return POWER_DOMAIN_PORT_DDI_B_LANES;
6207         case PORT_C:
6208                 return POWER_DOMAIN_PORT_DDI_C_LANES;
6209         case PORT_D:
6210                 return POWER_DOMAIN_PORT_DDI_D_LANES;
6211         case PORT_E:
6212                 return POWER_DOMAIN_PORT_DDI_E_LANES;
6213         case PORT_F:
6214                 return POWER_DOMAIN_PORT_DDI_F_LANES;
6215         default:
6216                 MISSING_CASE(port);
6217                 return POWER_DOMAIN_PORT_OTHER;
6218         }
6219 }
6220
6221 enum intel_display_power_domain
6222 intel_aux_power_domain(struct intel_digital_port *dig_port)
6223 {
6224         switch (dig_port->aux_ch) {
6225         case AUX_CH_A:
6226                 return POWER_DOMAIN_AUX_A;
6227         case AUX_CH_B:
6228                 return POWER_DOMAIN_AUX_B;
6229         case AUX_CH_C:
6230                 return POWER_DOMAIN_AUX_C;
6231         case AUX_CH_D:
6232                 return POWER_DOMAIN_AUX_D;
6233         case AUX_CH_E:
6234                 return POWER_DOMAIN_AUX_E;
6235         case AUX_CH_F:
6236                 return POWER_DOMAIN_AUX_F;
6237         default:
6238                 MISSING_CASE(dig_port->aux_ch);
6239                 return POWER_DOMAIN_AUX_A;
6240         }
6241 }
6242
6243 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
6244                                   struct intel_crtc_state *crtc_state)
6245 {
6246         struct drm_device *dev = crtc->dev;
6247         struct drm_i915_private *dev_priv = to_i915(dev);
6248         struct drm_encoder *encoder;
6249         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6250         enum pipe pipe = intel_crtc->pipe;
6251         u64 mask;
6252         enum transcoder transcoder = crtc_state->cpu_transcoder;
6253
6254         if (!crtc_state->base.active)
6255                 return 0;
6256
6257         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6258         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6259         if (crtc_state->pch_pfit.enabled ||
6260             crtc_state->pch_pfit.force_thru)
6261                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6262
6263         drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
6264                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6265
6266                 mask |= BIT_ULL(intel_encoder->power_domain);
6267         }
6268
6269         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6270                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6271
6272         if (crtc_state->shared_dpll)
6273                 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
6274
6275         return mask;
6276 }
6277
6278 static u64
6279 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
6280                                struct intel_crtc_state *crtc_state)
6281 {
6282         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6283         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6284         enum intel_display_power_domain domain;
6285         u64 domains, new_domains, old_domains;
6286
6287         old_domains = intel_crtc->enabled_power_domains;
6288         intel_crtc->enabled_power_domains = new_domains =
6289                 get_crtc_power_domains(crtc, crtc_state);
6290
6291         domains = new_domains & ~old_domains;
6292
6293         for_each_power_domain(domain, domains)
6294                 intel_display_power_get(dev_priv, domain);
6295
6296         return old_domains & ~new_domains;
6297 }
6298
6299 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6300                                       u64 domains)
6301 {
6302         enum intel_display_power_domain domain;
6303
6304         for_each_power_domain(domain, domains)
6305                 intel_display_power_put_unchecked(dev_priv, domain);
6306 }
6307
6308 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6309                                    struct drm_atomic_state *old_state)
6310 {
6311         struct intel_atomic_state *old_intel_state =
6312                 to_intel_atomic_state(old_state);
6313         struct drm_crtc *crtc = pipe_config->base.crtc;
6314         struct drm_device *dev = crtc->dev;
6315         struct drm_i915_private *dev_priv = to_i915(dev);
6316         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6317         int pipe = intel_crtc->pipe;
6318
6319         if (WARN_ON(intel_crtc->active))
6320                 return;
6321
6322         if (intel_crtc_has_dp_encoder(pipe_config))
6323                 intel_dp_set_m_n(pipe_config, M1_N1);
6324
6325         intel_set_pipe_timings(pipe_config);
6326         intel_set_pipe_src_size(pipe_config);
6327
6328         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6329                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6330                 I915_WRITE(CHV_CANVAS(pipe), 0);
6331         }
6332
6333         i9xx_set_pipeconf(pipe_config);
6334
6335         intel_crtc->active = true;
6336
6337         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6338
6339         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6340
6341         if (IS_CHERRYVIEW(dev_priv)) {
6342                 chv_prepare_pll(intel_crtc, pipe_config);
6343                 chv_enable_pll(intel_crtc, pipe_config);
6344         } else {
6345                 vlv_prepare_pll(intel_crtc, pipe_config);
6346                 vlv_enable_pll(intel_crtc, pipe_config);
6347         }
6348
6349         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6350
6351         i9xx_pfit_enable(pipe_config);
6352
6353         intel_color_load_luts(pipe_config);
6354         intel_color_commit(pipe_config);
6355
6356         dev_priv->display.initial_watermarks(old_intel_state,
6357                                              pipe_config);
6358         intel_enable_pipe(pipe_config);
6359
6360         assert_vblank_disabled(crtc);
6361         intel_crtc_vblank_on(pipe_config);
6362
6363         intel_encoders_enable(crtc, pipe_config, old_state);
6364 }
6365
6366 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
6367 {
6368         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6369         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6370
6371         I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6372         I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
6373 }
6374
6375 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6376                              struct drm_atomic_state *old_state)
6377 {
6378         struct intel_atomic_state *old_intel_state =
6379                 to_intel_atomic_state(old_state);
6380         struct drm_crtc *crtc = pipe_config->base.crtc;
6381         struct drm_device *dev = crtc->dev;
6382         struct drm_i915_private *dev_priv = to_i915(dev);
6383         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6384         enum pipe pipe = intel_crtc->pipe;
6385
6386         if (WARN_ON(intel_crtc->active))
6387                 return;
6388
6389         i9xx_set_pll_dividers(pipe_config);
6390
6391         if (intel_crtc_has_dp_encoder(pipe_config))
6392                 intel_dp_set_m_n(pipe_config, M1_N1);
6393
6394         intel_set_pipe_timings(pipe_config);
6395         intel_set_pipe_src_size(pipe_config);
6396
6397         i9xx_set_pipeconf(pipe_config);
6398
6399         intel_crtc->active = true;
6400
6401         if (!IS_GEN(dev_priv, 2))
6402                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6403
6404         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6405
6406         i9xx_enable_pll(intel_crtc, pipe_config);
6407
6408         i9xx_pfit_enable(pipe_config);
6409
6410         intel_color_load_luts(pipe_config);
6411         intel_color_commit(pipe_config);
6412
6413         if (dev_priv->display.initial_watermarks != NULL)
6414                 dev_priv->display.initial_watermarks(old_intel_state,
6415                                                      pipe_config);
6416         else
6417                 intel_update_watermarks(intel_crtc);
6418         intel_enable_pipe(pipe_config);
6419
6420         assert_vblank_disabled(crtc);
6421         intel_crtc_vblank_on(pipe_config);
6422
6423         intel_encoders_enable(crtc, pipe_config, old_state);
6424 }
6425
6426 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6427 {
6428         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6429         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6430
6431         if (!old_crtc_state->gmch_pfit.control)
6432                 return;
6433
6434         assert_pipe_disabled(dev_priv, crtc->pipe);
6435
6436         DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6437                       I915_READ(PFIT_CONTROL));
6438         I915_WRITE(PFIT_CONTROL, 0);
6439 }
6440
6441 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6442                               struct drm_atomic_state *old_state)
6443 {
6444         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6445         struct drm_device *dev = crtc->dev;
6446         struct drm_i915_private *dev_priv = to_i915(dev);
6447         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6448         int pipe = intel_crtc->pipe;
6449
6450         /*
6451          * On gen2 planes are double buffered but the pipe isn't, so we must
6452          * wait for planes to fully turn off before disabling the pipe.
6453          */
6454         if (IS_GEN(dev_priv, 2))
6455                 intel_wait_for_vblank(dev_priv, pipe);
6456
6457         intel_encoders_disable(crtc, old_crtc_state, old_state);
6458
6459         drm_crtc_vblank_off(crtc);
6460         assert_vblank_disabled(crtc);
6461
6462         intel_disable_pipe(old_crtc_state);
6463
6464         i9xx_pfit_disable(old_crtc_state);
6465
6466         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6467
6468         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
6469                 if (IS_CHERRYVIEW(dev_priv))
6470                         chv_disable_pll(dev_priv, pipe);
6471                 else if (IS_VALLEYVIEW(dev_priv))
6472                         vlv_disable_pll(dev_priv, pipe);
6473                 else
6474                         i9xx_disable_pll(old_crtc_state);
6475         }
6476
6477         intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6478
6479         if (!IS_GEN(dev_priv, 2))
6480                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6481
6482         if (!dev_priv->display.initial_watermarks)
6483                 intel_update_watermarks(intel_crtc);
6484
6485         /* clock the pipe down to 640x480@60 to potentially save power */
6486         if (IS_I830(dev_priv))
6487                 i830_enable_pipe(dev_priv, pipe);
6488 }
6489
6490 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6491                                         struct drm_modeset_acquire_ctx *ctx)
6492 {
6493         struct intel_encoder *encoder;
6494         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6495         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6496         enum intel_display_power_domain domain;
6497         struct intel_plane *plane;
6498         u64 domains;
6499         struct drm_atomic_state *state;
6500         struct intel_crtc_state *crtc_state;
6501         int ret;
6502
6503         if (!intel_crtc->active)
6504                 return;
6505
6506         for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6507                 const struct intel_plane_state *plane_state =
6508                         to_intel_plane_state(plane->base.state);
6509
6510                 if (plane_state->base.visible)
6511                         intel_plane_disable_noatomic(intel_crtc, plane);
6512         }
6513
6514         state = drm_atomic_state_alloc(crtc->dev);
6515         if (!state) {
6516                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6517                               crtc->base.id, crtc->name);
6518                 return;
6519         }
6520
6521         state->acquire_ctx = ctx;
6522
6523         /* Everything's already locked, -EDEADLK can't happen. */
6524         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6525         ret = drm_atomic_add_affected_connectors(state, crtc);
6526
6527         WARN_ON(IS_ERR(crtc_state) || ret);
6528
6529         dev_priv->display.crtc_disable(crtc_state, state);
6530
6531         drm_atomic_state_put(state);
6532
6533         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6534                       crtc->base.id, crtc->name);
6535
6536         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6537         crtc->state->active = false;
6538         intel_crtc->active = false;
6539         crtc->enabled = false;
6540         crtc->state->connector_mask = 0;
6541         crtc->state->encoder_mask = 0;
6542
6543         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6544                 encoder->base.crtc = NULL;
6545
6546         intel_fbc_disable(intel_crtc);
6547         intel_update_watermarks(intel_crtc);
6548         intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
6549
6550         domains = intel_crtc->enabled_power_domains;
6551         for_each_power_domain(domain, domains)
6552                 intel_display_power_put_unchecked(dev_priv, domain);
6553         intel_crtc->enabled_power_domains = 0;
6554
6555         dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6556         dev_priv->min_cdclk[intel_crtc->pipe] = 0;
6557         dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
6558 }
6559
6560 /*
6561  * turn all crtc's off, but do not adjust state
6562  * This has to be paired with a call to intel_modeset_setup_hw_state.
6563  */
6564 int intel_display_suspend(struct drm_device *dev)
6565 {
6566         struct drm_i915_private *dev_priv = to_i915(dev);
6567         struct drm_atomic_state *state;
6568         int ret;
6569
6570         state = drm_atomic_helper_suspend(dev);
6571         ret = PTR_ERR_OR_ZERO(state);
6572         if (ret)
6573                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6574         else
6575                 dev_priv->modeset_restore_state = state;
6576         return ret;
6577 }
6578
6579 void intel_encoder_destroy(struct drm_encoder *encoder)
6580 {
6581         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6582
6583         drm_encoder_cleanup(encoder);
6584         kfree(intel_encoder);
6585 }
6586
6587 /* Cross check the actual hw state with our own modeset state tracking (and it's
6588  * internal consistency). */
6589 static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6590                                          struct drm_connector_state *conn_state)
6591 {
6592         struct intel_connector *connector = to_intel_connector(conn_state->connector);
6593
6594         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6595                       connector->base.base.id,
6596                       connector->base.name);
6597
6598         if (connector->get_hw_state(connector)) {
6599                 struct intel_encoder *encoder = connector->encoder;
6600
6601                 I915_STATE_WARN(!crtc_state,
6602                          "connector enabled without attached crtc\n");
6603
6604                 if (!crtc_state)
6605                         return;
6606
6607                 I915_STATE_WARN(!crtc_state->active,
6608                       "connector is active, but attached crtc isn't\n");
6609
6610                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6611                         return;
6612
6613                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6614                         "atomic encoder doesn't match attached encoder\n");
6615
6616                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6617                         "attached encoder crtc differs from connector crtc\n");
6618         } else {
6619                 I915_STATE_WARN(crtc_state && crtc_state->active,
6620                         "attached crtc is active, but connector isn't\n");
6621                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
6622                         "best encoder set without crtc!\n");
6623         }
6624 }
6625
6626 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6627 {
6628         if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6629                 return crtc_state->fdi_lanes;
6630
6631         return 0;
6632 }
6633
6634 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6635                                      struct intel_crtc_state *pipe_config)
6636 {
6637         struct drm_i915_private *dev_priv = to_i915(dev);
6638         struct drm_atomic_state *state = pipe_config->base.state;
6639         struct intel_crtc *other_crtc;
6640         struct intel_crtc_state *other_crtc_state;
6641
6642         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6643                       pipe_name(pipe), pipe_config->fdi_lanes);
6644         if (pipe_config->fdi_lanes > 4) {
6645                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6646                               pipe_name(pipe), pipe_config->fdi_lanes);
6647                 return -EINVAL;
6648         }
6649
6650         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
6651                 if (pipe_config->fdi_lanes > 2) {
6652                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6653                                       pipe_config->fdi_lanes);
6654                         return -EINVAL;
6655                 } else {
6656                         return 0;
6657                 }
6658         }
6659
6660         if (INTEL_INFO(dev_priv)->num_pipes == 2)
6661                 return 0;
6662
6663         /* Ivybridge 3 pipe is really complicated */
6664         switch (pipe) {
6665         case PIPE_A:
6666                 return 0;
6667         case PIPE_B:
6668                 if (pipe_config->fdi_lanes <= 2)
6669                         return 0;
6670
6671                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
6672                 other_crtc_state =
6673                         intel_atomic_get_crtc_state(state, other_crtc);
6674                 if (IS_ERR(other_crtc_state))
6675                         return PTR_ERR(other_crtc_state);
6676
6677                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6678                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6679                                       pipe_name(pipe), pipe_config->fdi_lanes);
6680                         return -EINVAL;
6681                 }
6682                 return 0;
6683         case PIPE_C:
6684                 if (pipe_config->fdi_lanes > 2) {
6685                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6686                                       pipe_name(pipe), pipe_config->fdi_lanes);
6687                         return -EINVAL;
6688                 }
6689
6690                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
6691                 other_crtc_state =
6692                         intel_atomic_get_crtc_state(state, other_crtc);
6693                 if (IS_ERR(other_crtc_state))
6694                         return PTR_ERR(other_crtc_state);
6695
6696                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6697                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6698                         return -EINVAL;
6699                 }
6700                 return 0;
6701         default:
6702                 BUG();
6703         }
6704 }
6705
6706 #define RETRY 1
6707 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6708                                        struct intel_crtc_state *pipe_config)
6709 {
6710         struct drm_device *dev = intel_crtc->base.dev;
6711         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6712         int lane, link_bw, fdi_dotclock, ret;
6713         bool needs_recompute = false;
6714
6715 retry:
6716         /* FDI is a binary signal running at ~2.7GHz, encoding
6717          * each output octet as 10 bits. The actual frequency
6718          * is stored as a divider into a 100MHz clock, and the
6719          * mode pixel clock is stored in units of 1KHz.
6720          * Hence the bw of each lane in terms of the mode signal
6721          * is:
6722          */
6723         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6724
6725         fdi_dotclock = adjusted_mode->crtc_clock;
6726
6727         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6728                                            pipe_config->pipe_bpp);
6729
6730         pipe_config->fdi_lanes = lane;
6731
6732         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6733                                link_bw, &pipe_config->fdi_m_n, false);
6734
6735         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6736         if (ret == -EDEADLK)
6737                 return ret;
6738
6739         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6740                 pipe_config->pipe_bpp -= 2*3;
6741                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6742                               pipe_config->pipe_bpp);
6743                 needs_recompute = true;
6744                 pipe_config->bw_constrained = true;
6745
6746                 goto retry;
6747         }
6748
6749         if (needs_recompute)
6750                 return RETRY;
6751
6752         return ret;
6753 }
6754
6755 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
6756 {
6757         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6758         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6759
6760         /* IPS only exists on ULT machines and is tied to pipe A. */
6761         if (!hsw_crtc_supports_ips(crtc))
6762                 return false;
6763
6764         if (!i915_modparams.enable_ips)
6765                 return false;
6766
6767         if (crtc_state->pipe_bpp > 24)
6768                 return false;
6769
6770         /*
6771          * We compare against max which means we must take
6772          * the increased cdclk requirement into account when
6773          * calculating the new cdclk.
6774          *
6775          * Should measure whether using a lower cdclk w/o IPS
6776          */
6777         if (IS_BROADWELL(dev_priv) &&
6778             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
6779                 return false;
6780
6781         return true;
6782 }
6783
6784 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
6785 {
6786         struct drm_i915_private *dev_priv =
6787                 to_i915(crtc_state->base.crtc->dev);
6788         struct intel_atomic_state *intel_state =
6789                 to_intel_atomic_state(crtc_state->base.state);
6790
6791         if (!hsw_crtc_state_ips_capable(crtc_state))
6792                 return false;
6793
6794         if (crtc_state->ips_force_disable)
6795                 return false;
6796
6797         /* IPS should be fine as long as at least one plane is enabled. */
6798         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
6799                 return false;
6800
6801         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
6802         if (IS_BROADWELL(dev_priv) &&
6803             crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
6804                 return false;
6805
6806         return true;
6807 }
6808
6809 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6810 {
6811         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6812
6813         /* GDG double wide on either pipe, otherwise pipe A only */
6814         return INTEL_GEN(dev_priv) < 4 &&
6815                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6816 }
6817
6818 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
6819 {
6820         u32 pixel_rate;
6821
6822         pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
6823
6824         /*
6825          * We only use IF-ID interlacing. If we ever use
6826          * PF-ID we'll need to adjust the pixel_rate here.
6827          */
6828
6829         if (pipe_config->pch_pfit.enabled) {
6830                 u64 pipe_w, pipe_h, pfit_w, pfit_h;
6831                 u32 pfit_size = pipe_config->pch_pfit.size;
6832
6833                 pipe_w = pipe_config->pipe_src_w;
6834                 pipe_h = pipe_config->pipe_src_h;
6835
6836                 pfit_w = (pfit_size >> 16) & 0xFFFF;
6837                 pfit_h = pfit_size & 0xFFFF;
6838                 if (pipe_w < pfit_w)
6839                         pipe_w = pfit_w;
6840                 if (pipe_h < pfit_h)
6841                         pipe_h = pfit_h;
6842
6843                 if (WARN_ON(!pfit_w || !pfit_h))
6844                         return pixel_rate;
6845
6846                 pixel_rate = div_u64((u64)pixel_rate * pipe_w * pipe_h,
6847                                      pfit_w * pfit_h);
6848         }
6849
6850         return pixel_rate;
6851 }
6852
6853 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
6854 {
6855         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
6856
6857         if (HAS_GMCH(dev_priv))
6858                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6859                 crtc_state->pixel_rate =
6860                         crtc_state->base.adjusted_mode.crtc_clock;
6861         else
6862                 crtc_state->pixel_rate =
6863                         ilk_pipe_pixel_rate(crtc_state);
6864 }
6865
6866 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6867                                      struct intel_crtc_state *pipe_config)
6868 {
6869         struct drm_device *dev = crtc->base.dev;
6870         struct drm_i915_private *dev_priv = to_i915(dev);
6871         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6872         int clock_limit = dev_priv->max_dotclk_freq;
6873
6874         if (INTEL_GEN(dev_priv) < 4) {
6875                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6876
6877                 /*
6878                  * Enable double wide mode when the dot clock
6879                  * is > 90% of the (display) core speed.
6880                  */
6881                 if (intel_crtc_supports_double_wide(crtc) &&
6882                     adjusted_mode->crtc_clock > clock_limit) {
6883                         clock_limit = dev_priv->max_dotclk_freq;
6884                         pipe_config->double_wide = true;
6885                 }
6886         }
6887
6888         if (adjusted_mode->crtc_clock > clock_limit) {
6889                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6890                               adjusted_mode->crtc_clock, clock_limit,
6891                               yesno(pipe_config->double_wide));
6892                 return -EINVAL;
6893         }
6894
6895         if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6896              pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
6897              pipe_config->base.ctm) {
6898                 /*
6899                  * There is only one pipe CSC unit per pipe, and we need that
6900                  * for output conversion from RGB->YCBCR. So if CTM is already
6901                  * applied we can't support YCBCR420 output.
6902                  */
6903                 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
6904                 return -EINVAL;
6905         }
6906
6907         /*
6908          * Pipe horizontal size must be even in:
6909          * - DVO ganged mode
6910          * - LVDS dual channel mode
6911          * - Double wide pipe
6912          */
6913         if (pipe_config->pipe_src_w & 1) {
6914                 if (pipe_config->double_wide) {
6915                         DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
6916                         return -EINVAL;
6917                 }
6918
6919                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6920                     intel_is_dual_link_lvds(dev)) {
6921                         DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
6922                         return -EINVAL;
6923                 }
6924         }
6925
6926         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6927          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6928          */
6929         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
6930                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6931                 return -EINVAL;
6932
6933         intel_crtc_compute_pixel_rate(pipe_config);
6934
6935         if (pipe_config->has_pch_encoder)
6936                 return ironlake_fdi_compute_config(crtc, pipe_config);
6937
6938         return 0;
6939 }
6940
6941 static void
6942 intel_reduce_m_n_ratio(u32 *num, u32 *den)
6943 {
6944         while (*num > DATA_LINK_M_N_MASK ||
6945                *den > DATA_LINK_M_N_MASK) {
6946                 *num >>= 1;
6947                 *den >>= 1;
6948         }
6949 }
6950
6951 static void compute_m_n(unsigned int m, unsigned int n,
6952                         u32 *ret_m, u32 *ret_n,
6953                         bool constant_n)
6954 {
6955         /*
6956          * Several DP dongles in particular seem to be fussy about
6957          * too large link M/N values. Give N value as 0x8000 that
6958          * should be acceptable by specific devices. 0x8000 is the
6959          * specified fixed N value for asynchronous clock mode,
6960          * which the devices expect also in synchronous clock mode.
6961          */
6962         if (constant_n)
6963                 *ret_n = 0x8000;
6964         else
6965                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
6966
6967         *ret_m = div_u64((u64)m * *ret_n, n);
6968         intel_reduce_m_n_ratio(ret_m, ret_n);
6969 }
6970
6971 void
6972 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
6973                        int pixel_clock, int link_clock,
6974                        struct intel_link_m_n *m_n,
6975                        bool constant_n)
6976 {
6977         m_n->tu = 64;
6978
6979         compute_m_n(bits_per_pixel * pixel_clock,
6980                     link_clock * nlanes * 8,
6981                     &m_n->gmch_m, &m_n->gmch_n,
6982                     constant_n);
6983
6984         compute_m_n(pixel_clock, link_clock,
6985                     &m_n->link_m, &m_n->link_n,
6986                     constant_n);
6987 }
6988
6989 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
6990 {
6991         if (i915_modparams.panel_use_ssc >= 0)
6992                 return i915_modparams.panel_use_ssc != 0;
6993         return dev_priv->vbt.lvds_use_ssc
6994                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
6995 }
6996
6997 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
6998 {
6999         return (1 << dpll->n) << 16 | dpll->m2;
7000 }
7001
7002 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7003 {
7004         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7005 }
7006
7007 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7008                                      struct intel_crtc_state *crtc_state,
7009                                      struct dpll *reduced_clock)
7010 {
7011         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7012         u32 fp, fp2 = 0;
7013
7014         if (IS_PINEVIEW(dev_priv)) {
7015                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7016                 if (reduced_clock)
7017                         fp2 = pnv_dpll_compute_fp(reduced_clock);
7018         } else {
7019                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7020                 if (reduced_clock)
7021                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
7022         }
7023
7024         crtc_state->dpll_hw_state.fp0 = fp;
7025
7026         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7027             reduced_clock) {
7028                 crtc_state->dpll_hw_state.fp1 = fp2;
7029         } else {
7030                 crtc_state->dpll_hw_state.fp1 = fp;
7031         }
7032 }
7033
7034 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7035                 pipe)
7036 {
7037         u32 reg_val;
7038
7039         /*
7040          * PLLB opamp always calibrates to max value of 0x3f, force enable it
7041          * and set it to a reasonable value instead.
7042          */
7043         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7044         reg_val &= 0xffffff00;
7045         reg_val |= 0x00000030;
7046         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7047
7048         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7049         reg_val &= 0x00ffffff;
7050         reg_val |= 0x8c000000;
7051         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7052
7053         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7054         reg_val &= 0xffffff00;
7055         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7056
7057         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7058         reg_val &= 0x00ffffff;
7059         reg_val |= 0xb0000000;
7060         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7061 }
7062
7063 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7064                                          const struct intel_link_m_n *m_n)
7065 {
7066         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7067         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7068         enum pipe pipe = crtc->pipe;
7069
7070         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7071         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7072         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7073         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7074 }
7075
7076 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7077                                  enum transcoder transcoder)
7078 {
7079         if (IS_HASWELL(dev_priv))
7080                 return transcoder == TRANSCODER_EDP;
7081
7082         /*
7083          * Strictly speaking some registers are available before
7084          * gen7, but we only support DRRS on gen7+
7085          */
7086         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7087 }
7088
7089 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7090                                          const struct intel_link_m_n *m_n,
7091                                          const struct intel_link_m_n *m2_n2)
7092 {
7093         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7094         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7095         enum pipe pipe = crtc->pipe;
7096         enum transcoder transcoder = crtc_state->cpu_transcoder;
7097
7098         if (INTEL_GEN(dev_priv) >= 5) {
7099                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7100                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7101                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7102                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7103                 /*
7104                  *  M2_N2 registers are set only if DRRS is supported
7105                  * (to make sure the registers are not unnecessarily accessed).
7106                  */
7107                 if (m2_n2 && crtc_state->has_drrs &&
7108                     transcoder_has_m2_n2(dev_priv, transcoder)) {
7109                         I915_WRITE(PIPE_DATA_M2(transcoder),
7110                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7111                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7112                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7113                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7114                 }
7115         } else {
7116                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7117                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7118                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7119                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7120         }
7121 }
7122
7123 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7124 {
7125         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7126
7127         if (m_n == M1_N1) {
7128                 dp_m_n = &crtc_state->dp_m_n;
7129                 dp_m2_n2 = &crtc_state->dp_m2_n2;
7130         } else if (m_n == M2_N2) {
7131
7132                 /*
7133                  * M2_N2 registers are not supported. Hence m2_n2 divider value
7134                  * needs to be programmed into M1_N1.
7135                  */
7136                 dp_m_n = &crtc_state->dp_m2_n2;
7137         } else {
7138                 DRM_ERROR("Unsupported divider value\n");
7139                 return;
7140         }
7141
7142         if (crtc_state->has_pch_encoder)
7143                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7144         else
7145                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7146 }
7147
7148 static void vlv_compute_dpll(struct intel_crtc *crtc,
7149                              struct intel_crtc_state *pipe_config)
7150 {
7151         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7152                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7153         if (crtc->pipe != PIPE_A)
7154                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7155
7156         /* DPLL not used with DSI, but still need the rest set up */
7157         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7158                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7159                         DPLL_EXT_BUFFER_ENABLE_VLV;
7160
7161         pipe_config->dpll_hw_state.dpll_md =
7162                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7163 }
7164
7165 static void chv_compute_dpll(struct intel_crtc *crtc,
7166                              struct intel_crtc_state *pipe_config)
7167 {
7168         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7169                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7170         if (crtc->pipe != PIPE_A)
7171                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7172
7173         /* DPLL not used with DSI, but still need the rest set up */
7174         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7175                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7176
7177         pipe_config->dpll_hw_state.dpll_md =
7178                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7179 }
7180
7181 static void vlv_prepare_pll(struct intel_crtc *crtc,
7182                             const struct intel_crtc_state *pipe_config)
7183 {
7184         struct drm_device *dev = crtc->base.dev;
7185         struct drm_i915_private *dev_priv = to_i915(dev);
7186         enum pipe pipe = crtc->pipe;
7187         u32 mdiv;
7188         u32 bestn, bestm1, bestm2, bestp1, bestp2;
7189         u32 coreclk, reg_val;
7190
7191         /* Enable Refclk */
7192         I915_WRITE(DPLL(pipe),
7193                    pipe_config->dpll_hw_state.dpll &
7194                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7195
7196         /* No need to actually set up the DPLL with DSI */
7197         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7198                 return;
7199
7200         mutex_lock(&dev_priv->sb_lock);
7201
7202         bestn = pipe_config->dpll.n;
7203         bestm1 = pipe_config->dpll.m1;
7204         bestm2 = pipe_config->dpll.m2;
7205         bestp1 = pipe_config->dpll.p1;
7206         bestp2 = pipe_config->dpll.p2;
7207
7208         /* See eDP HDMI DPIO driver vbios notes doc */
7209
7210         /* PLL B needs special handling */
7211         if (pipe == PIPE_B)
7212                 vlv_pllb_recal_opamp(dev_priv, pipe);
7213
7214         /* Set up Tx target for periodic Rcomp update */
7215         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7216
7217         /* Disable target IRef on PLL */
7218         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7219         reg_val &= 0x00ffffff;
7220         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7221
7222         /* Disable fast lock */
7223         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7224
7225         /* Set idtafcrecal before PLL is enabled */
7226         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7227         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7228         mdiv |= ((bestn << DPIO_N_SHIFT));
7229         mdiv |= (1 << DPIO_K_SHIFT);
7230
7231         /*
7232          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7233          * but we don't support that).
7234          * Note: don't use the DAC post divider as it seems unstable.
7235          */
7236         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7237         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7238
7239         mdiv |= DPIO_ENABLE_CALIBRATION;
7240         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7241
7242         /* Set HBR and RBR LPF coefficients */
7243         if (pipe_config->port_clock == 162000 ||
7244             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7245             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7246                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7247                                  0x009f0003);
7248         else
7249                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7250                                  0x00d0000f);
7251
7252         if (intel_crtc_has_dp_encoder(pipe_config)) {
7253                 /* Use SSC source */
7254                 if (pipe == PIPE_A)
7255                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7256                                          0x0df40000);
7257                 else
7258                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7259                                          0x0df70000);
7260         } else { /* HDMI or VGA */
7261                 /* Use bend source */
7262                 if (pipe == PIPE_A)
7263                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7264                                          0x0df70000);
7265                 else
7266                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7267                                          0x0df40000);
7268         }
7269
7270         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7271         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7272         if (intel_crtc_has_dp_encoder(pipe_config))
7273                 coreclk |= 0x01000000;
7274         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7275
7276         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7277         mutex_unlock(&dev_priv->sb_lock);
7278 }
7279
7280 static void chv_prepare_pll(struct intel_crtc *crtc,
7281                             const struct intel_crtc_state *pipe_config)
7282 {
7283         struct drm_device *dev = crtc->base.dev;
7284         struct drm_i915_private *dev_priv = to_i915(dev);
7285         enum pipe pipe = crtc->pipe;
7286         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7287         u32 loopfilter, tribuf_calcntr;
7288         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7289         u32 dpio_val;
7290         int vco;
7291
7292         /* Enable Refclk and SSC */
7293         I915_WRITE(DPLL(pipe),
7294                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7295
7296         /* No need to actually set up the DPLL with DSI */
7297         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7298                 return;
7299
7300         bestn = pipe_config->dpll.n;
7301         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7302         bestm1 = pipe_config->dpll.m1;
7303         bestm2 = pipe_config->dpll.m2 >> 22;
7304         bestp1 = pipe_config->dpll.p1;
7305         bestp2 = pipe_config->dpll.p2;
7306         vco = pipe_config->dpll.vco;
7307         dpio_val = 0;
7308         loopfilter = 0;
7309
7310         mutex_lock(&dev_priv->sb_lock);
7311
7312         /* p1 and p2 divider */
7313         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7314                         5 << DPIO_CHV_S1_DIV_SHIFT |
7315                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7316                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7317                         1 << DPIO_CHV_K_DIV_SHIFT);
7318
7319         /* Feedback post-divider - m2 */
7320         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7321
7322         /* Feedback refclk divider - n and m1 */
7323         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7324                         DPIO_CHV_M1_DIV_BY_2 |
7325                         1 << DPIO_CHV_N_DIV_SHIFT);
7326
7327         /* M2 fraction division */
7328         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7329
7330         /* M2 fraction division enable */
7331         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7332         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7333         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7334         if (bestm2_frac)
7335                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7336         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7337
7338         /* Program digital lock detect threshold */
7339         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7340         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7341                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7342         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7343         if (!bestm2_frac)
7344                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7345         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7346
7347         /* Loop filter */
7348         if (vco == 5400000) {
7349                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7350                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7351                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7352                 tribuf_calcntr = 0x9;
7353         } else if (vco <= 6200000) {
7354                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7355                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7356                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7357                 tribuf_calcntr = 0x9;
7358         } else if (vco <= 6480000) {
7359                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7360                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7361                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7362                 tribuf_calcntr = 0x8;
7363         } else {
7364                 /* Not supported. Apply the same limits as in the max case */
7365                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7366                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7367                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7368                 tribuf_calcntr = 0;
7369         }
7370         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7371
7372         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7373         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7374         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7375         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7376
7377         /* AFC Recal */
7378         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7379                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7380                         DPIO_AFC_RECAL);
7381
7382         mutex_unlock(&dev_priv->sb_lock);
7383 }
7384
7385 /**
7386  * vlv_force_pll_on - forcibly enable just the PLL
7387  * @dev_priv: i915 private structure
7388  * @pipe: pipe PLL to enable
7389  * @dpll: PLL configuration
7390  *
7391  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7392  * in cases where we need the PLL enabled even when @pipe is not going to
7393  * be enabled.
7394  */
7395 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7396                      const struct dpll *dpll)
7397 {
7398         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7399         struct intel_crtc_state *pipe_config;
7400
7401         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7402         if (!pipe_config)
7403                 return -ENOMEM;
7404
7405         pipe_config->base.crtc = &crtc->base;
7406         pipe_config->pixel_multiplier = 1;
7407         pipe_config->dpll = *dpll;
7408
7409         if (IS_CHERRYVIEW(dev_priv)) {
7410                 chv_compute_dpll(crtc, pipe_config);
7411                 chv_prepare_pll(crtc, pipe_config);
7412                 chv_enable_pll(crtc, pipe_config);
7413         } else {
7414                 vlv_compute_dpll(crtc, pipe_config);
7415                 vlv_prepare_pll(crtc, pipe_config);
7416                 vlv_enable_pll(crtc, pipe_config);
7417         }
7418
7419         kfree(pipe_config);
7420
7421         return 0;
7422 }
7423
7424 /**
7425  * vlv_force_pll_off - forcibly disable just the PLL
7426  * @dev_priv: i915 private structure
7427  * @pipe: pipe PLL to disable
7428  *
7429  * Disable the PLL for @pipe. To be used in cases where we need
7430  * the PLL enabled even when @pipe is not going to be enabled.
7431  */
7432 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7433 {
7434         if (IS_CHERRYVIEW(dev_priv))
7435                 chv_disable_pll(dev_priv, pipe);
7436         else
7437                 vlv_disable_pll(dev_priv, pipe);
7438 }
7439
7440 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7441                               struct intel_crtc_state *crtc_state,
7442                               struct dpll *reduced_clock)
7443 {
7444         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7445         u32 dpll;
7446         struct dpll *clock = &crtc_state->dpll;
7447
7448         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7449
7450         dpll = DPLL_VGA_MODE_DIS;
7451
7452         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7453                 dpll |= DPLLB_MODE_LVDS;
7454         else
7455                 dpll |= DPLLB_MODE_DAC_SERIAL;
7456
7457         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7458             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7459                 dpll |= (crtc_state->pixel_multiplier - 1)
7460                         << SDVO_MULTIPLIER_SHIFT_HIRES;
7461         }
7462
7463         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7464             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7465                 dpll |= DPLL_SDVO_HIGH_SPEED;
7466
7467         if (intel_crtc_has_dp_encoder(crtc_state))
7468                 dpll |= DPLL_SDVO_HIGH_SPEED;
7469
7470         /* compute bitmask from p1 value */
7471         if (IS_PINEVIEW(dev_priv))
7472                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7473         else {
7474                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7475                 if (IS_G4X(dev_priv) && reduced_clock)
7476                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7477         }
7478         switch (clock->p2) {
7479         case 5:
7480                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7481                 break;
7482         case 7:
7483                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7484                 break;
7485         case 10:
7486                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7487                 break;
7488         case 14:
7489                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7490                 break;
7491         }
7492         if (INTEL_GEN(dev_priv) >= 4)
7493                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7494
7495         if (crtc_state->sdvo_tv_clock)
7496                 dpll |= PLL_REF_INPUT_TVCLKINBC;
7497         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7498                  intel_panel_use_ssc(dev_priv))
7499                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7500         else
7501                 dpll |= PLL_REF_INPUT_DREFCLK;
7502
7503         dpll |= DPLL_VCO_ENABLE;
7504         crtc_state->dpll_hw_state.dpll = dpll;
7505
7506         if (INTEL_GEN(dev_priv) >= 4) {
7507                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7508                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7509                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7510         }
7511 }
7512
7513 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7514                               struct intel_crtc_state *crtc_state,
7515                               struct dpll *reduced_clock)
7516 {
7517         struct drm_device *dev = crtc->base.dev;
7518         struct drm_i915_private *dev_priv = to_i915(dev);
7519         u32 dpll;
7520         struct dpll *clock = &crtc_state->dpll;
7521
7522         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7523
7524         dpll = DPLL_VGA_MODE_DIS;
7525
7526         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7527                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7528         } else {
7529                 if (clock->p1 == 2)
7530                         dpll |= PLL_P1_DIVIDE_BY_TWO;
7531                 else
7532                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7533                 if (clock->p2 == 4)
7534                         dpll |= PLL_P2_DIVIDE_BY_4;
7535         }
7536
7537         if (!IS_I830(dev_priv) &&
7538             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7539                 dpll |= DPLL_DVO_2X_MODE;
7540
7541         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7542             intel_panel_use_ssc(dev_priv))
7543                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7544         else
7545                 dpll |= PLL_REF_INPUT_DREFCLK;
7546
7547         dpll |= DPLL_VCO_ENABLE;
7548         crtc_state->dpll_hw_state.dpll = dpll;
7549 }
7550
7551 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
7552 {
7553         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7554         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7555         enum pipe pipe = crtc->pipe;
7556         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7557         const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
7558         u32 crtc_vtotal, crtc_vblank_end;
7559         int vsyncshift = 0;
7560
7561         /* We need to be careful not to changed the adjusted mode, for otherwise
7562          * the hw state checker will get angry at the mismatch. */
7563         crtc_vtotal = adjusted_mode->crtc_vtotal;
7564         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7565
7566         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7567                 /* the chip adds 2 halflines automatically */
7568                 crtc_vtotal -= 1;
7569                 crtc_vblank_end -= 1;
7570
7571                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7572                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7573                 else
7574                         vsyncshift = adjusted_mode->crtc_hsync_start -
7575                                 adjusted_mode->crtc_htotal / 2;
7576                 if (vsyncshift < 0)
7577                         vsyncshift += adjusted_mode->crtc_htotal;
7578         }
7579
7580         if (INTEL_GEN(dev_priv) > 3)
7581                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7582
7583         I915_WRITE(HTOTAL(cpu_transcoder),
7584                    (adjusted_mode->crtc_hdisplay - 1) |
7585                    ((adjusted_mode->crtc_htotal - 1) << 16));
7586         I915_WRITE(HBLANK(cpu_transcoder),
7587                    (adjusted_mode->crtc_hblank_start - 1) |
7588                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
7589         I915_WRITE(HSYNC(cpu_transcoder),
7590                    (adjusted_mode->crtc_hsync_start - 1) |
7591                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
7592
7593         I915_WRITE(VTOTAL(cpu_transcoder),
7594                    (adjusted_mode->crtc_vdisplay - 1) |
7595                    ((crtc_vtotal - 1) << 16));
7596         I915_WRITE(VBLANK(cpu_transcoder),
7597                    (adjusted_mode->crtc_vblank_start - 1) |
7598                    ((crtc_vblank_end - 1) << 16));
7599         I915_WRITE(VSYNC(cpu_transcoder),
7600                    (adjusted_mode->crtc_vsync_start - 1) |
7601                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
7602
7603         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7604          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7605          * documented on the DDI_FUNC_CTL register description, EDP Input Select
7606          * bits. */
7607         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
7608             (pipe == PIPE_B || pipe == PIPE_C))
7609                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7610
7611 }
7612
7613 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
7614 {
7615         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7616         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7617         enum pipe pipe = crtc->pipe;
7618
7619         /* pipesrc controls the size that is scaled from, which should
7620          * always be the user's requested size.
7621          */
7622         I915_WRITE(PIPESRC(pipe),
7623                    ((crtc_state->pipe_src_w - 1) << 16) |
7624                    (crtc_state->pipe_src_h - 1));
7625 }
7626
7627 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7628                                    struct intel_crtc_state *pipe_config)
7629 {
7630         struct drm_device *dev = crtc->base.dev;
7631         struct drm_i915_private *dev_priv = to_i915(dev);
7632         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7633         u32 tmp;
7634
7635         tmp = I915_READ(HTOTAL(cpu_transcoder));
7636         pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7637         pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7638         tmp = I915_READ(HBLANK(cpu_transcoder));
7639         pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7640         pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7641         tmp = I915_READ(HSYNC(cpu_transcoder));
7642         pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7643         pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7644
7645         tmp = I915_READ(VTOTAL(cpu_transcoder));
7646         pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7647         pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7648         tmp = I915_READ(VBLANK(cpu_transcoder));
7649         pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7650         pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7651         tmp = I915_READ(VSYNC(cpu_transcoder));
7652         pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7653         pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7654
7655         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7656                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7657                 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7658                 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7659         }
7660 }
7661
7662 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7663                                     struct intel_crtc_state *pipe_config)
7664 {
7665         struct drm_device *dev = crtc->base.dev;
7666         struct drm_i915_private *dev_priv = to_i915(dev);
7667         u32 tmp;
7668
7669         tmp = I915_READ(PIPESRC(crtc->pipe));
7670         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7671         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7672
7673         pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7674         pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7675 }
7676
7677 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7678                                  struct intel_crtc_state *pipe_config)
7679 {
7680         mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7681         mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7682         mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7683         mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7684
7685         mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7686         mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7687         mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7688         mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7689
7690         mode->flags = pipe_config->base.adjusted_mode.flags;
7691         mode->type = DRM_MODE_TYPE_DRIVER;
7692
7693         mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7694
7695         mode->hsync = drm_mode_hsync(mode);
7696         mode->vrefresh = drm_mode_vrefresh(mode);
7697         drm_mode_set_name(mode);
7698 }
7699
7700 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
7701 {
7702         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7703         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7704         u32 pipeconf;
7705
7706         pipeconf = 0;
7707
7708         /* we keep both pipes enabled on 830 */
7709         if (IS_I830(dev_priv))
7710                 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
7711
7712         if (crtc_state->double_wide)
7713                 pipeconf |= PIPECONF_DOUBLE_WIDE;
7714
7715         /* only g4x and later have fancy bpc/dither controls */
7716         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7717             IS_CHERRYVIEW(dev_priv)) {
7718                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7719                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
7720                         pipeconf |= PIPECONF_DITHER_EN |
7721                                     PIPECONF_DITHER_TYPE_SP;
7722
7723                 switch (crtc_state->pipe_bpp) {
7724                 case 18:
7725                         pipeconf |= PIPECONF_6BPC;
7726                         break;
7727                 case 24:
7728                         pipeconf |= PIPECONF_8BPC;
7729                         break;
7730                 case 30:
7731                         pipeconf |= PIPECONF_10BPC;
7732                         break;
7733                 default:
7734                         /* Case prevented by intel_choose_pipe_bpp_dither. */
7735                         BUG();
7736                 }
7737         }
7738
7739         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7740                 if (INTEL_GEN(dev_priv) < 4 ||
7741                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7742                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7743                 else
7744                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7745         } else
7746                 pipeconf |= PIPECONF_PROGRESSIVE;
7747
7748         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7749              crtc_state->limited_color_range)
7750                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7751
7752         I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
7753         POSTING_READ(PIPECONF(crtc->pipe));
7754 }
7755
7756 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7757                                    struct intel_crtc_state *crtc_state)
7758 {
7759         struct drm_device *dev = crtc->base.dev;
7760         struct drm_i915_private *dev_priv = to_i915(dev);
7761         const struct intel_limit *limit;
7762         int refclk = 48000;
7763
7764         memset(&crtc_state->dpll_hw_state, 0,
7765                sizeof(crtc_state->dpll_hw_state));
7766
7767         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7768                 if (intel_panel_use_ssc(dev_priv)) {
7769                         refclk = dev_priv->vbt.lvds_ssc_freq;
7770                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7771                 }
7772
7773                 limit = &intel_limits_i8xx_lvds;
7774         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
7775                 limit = &intel_limits_i8xx_dvo;
7776         } else {
7777                 limit = &intel_limits_i8xx_dac;
7778         }
7779
7780         if (!crtc_state->clock_set &&
7781             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7782                                  refclk, NULL, &crtc_state->dpll)) {
7783                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7784                 return -EINVAL;
7785         }
7786
7787         i8xx_compute_dpll(crtc, crtc_state, NULL);
7788
7789         return 0;
7790 }
7791
7792 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7793                                   struct intel_crtc_state *crtc_state)
7794 {
7795         struct drm_device *dev = crtc->base.dev;
7796         struct drm_i915_private *dev_priv = to_i915(dev);
7797         const struct intel_limit *limit;
7798         int refclk = 96000;
7799
7800         memset(&crtc_state->dpll_hw_state, 0,
7801                sizeof(crtc_state->dpll_hw_state));
7802
7803         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7804                 if (intel_panel_use_ssc(dev_priv)) {
7805                         refclk = dev_priv->vbt.lvds_ssc_freq;
7806                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7807                 }
7808
7809                 if (intel_is_dual_link_lvds(dev))
7810                         limit = &intel_limits_g4x_dual_channel_lvds;
7811                 else
7812                         limit = &intel_limits_g4x_single_channel_lvds;
7813         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7814                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7815                 limit = &intel_limits_g4x_hdmi;
7816         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7817                 limit = &intel_limits_g4x_sdvo;
7818         } else {
7819                 /* The option is for other outputs */
7820                 limit = &intel_limits_i9xx_sdvo;
7821         }
7822
7823         if (!crtc_state->clock_set &&
7824             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7825                                 refclk, NULL, &crtc_state->dpll)) {
7826                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7827                 return -EINVAL;
7828         }
7829
7830         i9xx_compute_dpll(crtc, crtc_state, NULL);
7831
7832         return 0;
7833 }
7834
7835 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7836                                   struct intel_crtc_state *crtc_state)
7837 {
7838         struct drm_device *dev = crtc->base.dev;
7839         struct drm_i915_private *dev_priv = to_i915(dev);
7840         const struct intel_limit *limit;
7841         int refclk = 96000;
7842
7843         memset(&crtc_state->dpll_hw_state, 0,
7844                sizeof(crtc_state->dpll_hw_state));
7845
7846         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7847                 if (intel_panel_use_ssc(dev_priv)) {
7848                         refclk = dev_priv->vbt.lvds_ssc_freq;
7849                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7850                 }
7851
7852                 limit = &intel_limits_pineview_lvds;
7853         } else {
7854                 limit = &intel_limits_pineview_sdvo;
7855         }
7856
7857         if (!crtc_state->clock_set &&
7858             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7859                                 refclk, NULL, &crtc_state->dpll)) {
7860                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7861                 return -EINVAL;
7862         }
7863
7864         i9xx_compute_dpll(crtc, crtc_state, NULL);
7865
7866         return 0;
7867 }
7868
7869 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7870                                    struct intel_crtc_state *crtc_state)
7871 {
7872         struct drm_device *dev = crtc->base.dev;
7873         struct drm_i915_private *dev_priv = to_i915(dev);
7874         const struct intel_limit *limit;
7875         int refclk = 96000;
7876
7877         memset(&crtc_state->dpll_hw_state, 0,
7878                sizeof(crtc_state->dpll_hw_state));
7879
7880         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7881                 if (intel_panel_use_ssc(dev_priv)) {
7882                         refclk = dev_priv->vbt.lvds_ssc_freq;
7883                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7884                 }
7885
7886                 limit = &intel_limits_i9xx_lvds;
7887         } else {
7888                 limit = &intel_limits_i9xx_sdvo;
7889         }
7890
7891         if (!crtc_state->clock_set &&
7892             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7893                                  refclk, NULL, &crtc_state->dpll)) {
7894                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7895                 return -EINVAL;
7896         }
7897
7898         i9xx_compute_dpll(crtc, crtc_state, NULL);
7899
7900         return 0;
7901 }
7902
7903 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7904                                   struct intel_crtc_state *crtc_state)
7905 {
7906         int refclk = 100000;
7907         const struct intel_limit *limit = &intel_limits_chv;
7908
7909         memset(&crtc_state->dpll_hw_state, 0,
7910                sizeof(crtc_state->dpll_hw_state));
7911
7912         if (!crtc_state->clock_set &&
7913             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7914                                 refclk, NULL, &crtc_state->dpll)) {
7915                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7916                 return -EINVAL;
7917         }
7918
7919         chv_compute_dpll(crtc, crtc_state);
7920
7921         return 0;
7922 }
7923
7924 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7925                                   struct intel_crtc_state *crtc_state)
7926 {
7927         int refclk = 100000;
7928         const struct intel_limit *limit = &intel_limits_vlv;
7929
7930         memset(&crtc_state->dpll_hw_state, 0,
7931                sizeof(crtc_state->dpll_hw_state));
7932
7933         if (!crtc_state->clock_set &&
7934             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7935                                 refclk, NULL, &crtc_state->dpll)) {
7936                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7937                 return -EINVAL;
7938         }
7939
7940         vlv_compute_dpll(crtc, crtc_state);
7941
7942         return 0;
7943 }
7944
7945 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
7946                                  struct intel_crtc_state *pipe_config)
7947 {
7948         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7949         u32 tmp;
7950
7951         if (INTEL_GEN(dev_priv) <= 3 &&
7952             (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
7953                 return;
7954
7955         tmp = I915_READ(PFIT_CONTROL);
7956         if (!(tmp & PFIT_ENABLE))
7957                 return;
7958
7959         /* Check whether the pfit is attached to our pipe. */
7960         if (INTEL_GEN(dev_priv) < 4) {
7961                 if (crtc->pipe != PIPE_B)
7962                         return;
7963         } else {
7964                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7965                         return;
7966         }
7967
7968         pipe_config->gmch_pfit.control = tmp;
7969         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
7970 }
7971
7972 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
7973                                struct intel_crtc_state *pipe_config)
7974 {
7975         struct drm_device *dev = crtc->base.dev;
7976         struct drm_i915_private *dev_priv = to_i915(dev);
7977         int pipe = pipe_config->cpu_transcoder;
7978         struct dpll clock;
7979         u32 mdiv;
7980         int refclk = 100000;
7981
7982         /* In case of DSI, DPLL will not be used */
7983         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7984                 return;
7985
7986         mutex_lock(&dev_priv->sb_lock);
7987         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
7988         mutex_unlock(&dev_priv->sb_lock);
7989
7990         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
7991         clock.m2 = mdiv & DPIO_M2DIV_MASK;
7992         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
7993         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
7994         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
7995
7996         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
7997 }
7998
7999 static void
8000 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8001                               struct intel_initial_plane_config *plane_config)
8002 {
8003         struct drm_device *dev = crtc->base.dev;
8004         struct drm_i915_private *dev_priv = to_i915(dev);
8005         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8006         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8007         enum pipe pipe;
8008         u32 val, base, offset;
8009         int fourcc, pixel_format;
8010         unsigned int aligned_height;
8011         struct drm_framebuffer *fb;
8012         struct intel_framebuffer *intel_fb;
8013
8014         if (!plane->get_hw_state(plane, &pipe))
8015                 return;
8016
8017         WARN_ON(pipe != crtc->pipe);
8018
8019         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8020         if (!intel_fb) {
8021                 DRM_DEBUG_KMS("failed to alloc fb\n");
8022                 return;
8023         }
8024
8025         fb = &intel_fb->base;
8026
8027         fb->dev = dev;
8028
8029         val = I915_READ(DSPCNTR(i9xx_plane));
8030
8031         if (INTEL_GEN(dev_priv) >= 4) {
8032                 if (val & DISPPLANE_TILED) {
8033                         plane_config->tiling = I915_TILING_X;
8034                         fb->modifier = I915_FORMAT_MOD_X_TILED;
8035                 }
8036
8037                 if (val & DISPPLANE_ROTATE_180)
8038                         plane_config->rotation = DRM_MODE_ROTATE_180;
8039         }
8040
8041         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8042             val & DISPPLANE_MIRROR)
8043                 plane_config->rotation |= DRM_MODE_REFLECT_X;
8044
8045         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8046         fourcc = i9xx_format_to_fourcc(pixel_format);
8047         fb->format = drm_format_info(fourcc);
8048
8049         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8050                 offset = I915_READ(DSPOFFSET(i9xx_plane));
8051                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8052         } else if (INTEL_GEN(dev_priv) >= 4) {
8053                 if (plane_config->tiling)
8054                         offset = I915_READ(DSPTILEOFF(i9xx_plane));
8055                 else
8056                         offset = I915_READ(DSPLINOFF(i9xx_plane));
8057                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8058         } else {
8059                 base = I915_READ(DSPADDR(i9xx_plane));
8060         }
8061         plane_config->base = base;
8062
8063         val = I915_READ(PIPESRC(pipe));
8064         fb->width = ((val >> 16) & 0xfff) + 1;
8065         fb->height = ((val >> 0) & 0xfff) + 1;
8066
8067         val = I915_READ(DSPSTRIDE(i9xx_plane));
8068         fb->pitches[0] = val & 0xffffffc0;
8069
8070         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8071
8072         plane_config->size = fb->pitches[0] * aligned_height;
8073
8074         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8075                       crtc->base.name, plane->base.name, fb->width, fb->height,
8076                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8077                       plane_config->size);
8078
8079         plane_config->fb = intel_fb;
8080 }
8081
8082 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8083                                struct intel_crtc_state *pipe_config)
8084 {
8085         struct drm_device *dev = crtc->base.dev;
8086         struct drm_i915_private *dev_priv = to_i915(dev);
8087         int pipe = pipe_config->cpu_transcoder;
8088         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8089         struct dpll clock;
8090         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8091         int refclk = 100000;
8092
8093         /* In case of DSI, DPLL will not be used */
8094         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8095                 return;
8096
8097         mutex_lock(&dev_priv->sb_lock);
8098         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8099         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8100         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8101         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8102         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8103         mutex_unlock(&dev_priv->sb_lock);
8104
8105         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8106         clock.m2 = (pll_dw0 & 0xff) << 22;
8107         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8108                 clock.m2 |= pll_dw2 & 0x3fffff;
8109         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8110         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8111         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8112
8113         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8114 }
8115
8116 static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
8117                                         struct intel_crtc_state *pipe_config)
8118 {
8119         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8120         enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
8121
8122         pipe_config->lspcon_downsampling = false;
8123
8124         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8125                 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
8126
8127                 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8128                         bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
8129                         bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
8130
8131                         if (ycbcr420_enabled) {
8132                                 /* We support 4:2:0 in full blend mode only */
8133                                 if (!blend)
8134                                         output = INTEL_OUTPUT_FORMAT_INVALID;
8135                                 else if (!(IS_GEMINILAKE(dev_priv) ||
8136                                            INTEL_GEN(dev_priv) >= 10))
8137                                         output = INTEL_OUTPUT_FORMAT_INVALID;
8138                                 else
8139                                         output = INTEL_OUTPUT_FORMAT_YCBCR420;
8140                         } else {
8141                                 /*
8142                                  * Currently there is no interface defined to
8143                                  * check user preference between RGB/YCBCR444
8144                                  * or YCBCR420. So the only possible case for
8145                                  * YCBCR444 usage is driving YCBCR420 output
8146                                  * with LSPCON, when pipe is configured for
8147                                  * YCBCR444 output and LSPCON takes care of
8148                                  * downsampling it.
8149                                  */
8150                                 pipe_config->lspcon_downsampling = true;
8151                                 output = INTEL_OUTPUT_FORMAT_YCBCR444;
8152                         }
8153                 }
8154         }
8155
8156         pipe_config->output_format = output;
8157 }
8158
8159 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8160                                  struct intel_crtc_state *pipe_config)
8161 {
8162         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8163         enum intel_display_power_domain power_domain;
8164         intel_wakeref_t wakeref;
8165         u32 tmp;
8166         bool ret;
8167
8168         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8169         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8170         if (!wakeref)
8171                 return false;
8172
8173         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8174         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8175         pipe_config->shared_dpll = NULL;
8176
8177         ret = false;
8178
8179         tmp = I915_READ(PIPECONF(crtc->pipe));
8180         if (!(tmp & PIPECONF_ENABLE))
8181                 goto out;
8182
8183         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8184             IS_CHERRYVIEW(dev_priv)) {
8185                 switch (tmp & PIPECONF_BPC_MASK) {
8186                 case PIPECONF_6BPC:
8187                         pipe_config->pipe_bpp = 18;
8188                         break;
8189                 case PIPECONF_8BPC:
8190                         pipe_config->pipe_bpp = 24;
8191                         break;
8192                 case PIPECONF_10BPC:
8193                         pipe_config->pipe_bpp = 30;
8194                         break;
8195                 default:
8196                         break;
8197                 }
8198         }
8199
8200         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8201             (tmp & PIPECONF_COLOR_RANGE_SELECT))
8202                 pipe_config->limited_color_range = true;
8203
8204         if (INTEL_GEN(dev_priv) < 4)
8205                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8206
8207         intel_get_pipe_timings(crtc, pipe_config);
8208         intel_get_pipe_src_size(crtc, pipe_config);
8209
8210         i9xx_get_pfit_config(crtc, pipe_config);
8211
8212         if (INTEL_GEN(dev_priv) >= 4) {
8213                 /* No way to read it out on pipes B and C */
8214                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8215                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
8216                 else
8217                         tmp = I915_READ(DPLL_MD(crtc->pipe));
8218                 pipe_config->pixel_multiplier =
8219                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8220                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8221                 pipe_config->dpll_hw_state.dpll_md = tmp;
8222         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8223                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8224                 tmp = I915_READ(DPLL(crtc->pipe));
8225                 pipe_config->pixel_multiplier =
8226                         ((tmp & SDVO_MULTIPLIER_MASK)
8227                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8228         } else {
8229                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8230                  * port and will be fixed up in the encoder->get_config
8231                  * function. */
8232                 pipe_config->pixel_multiplier = 1;
8233         }
8234         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8235         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8236                 /*
8237                  * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8238                  * on 830. Filter it out here so that we don't
8239                  * report errors due to that.
8240                  */
8241                 if (IS_I830(dev_priv))
8242                         pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8243
8244                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8245                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8246         } else {
8247                 /* Mask out read-only status bits. */
8248                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8249                                                      DPLL_PORTC_READY_MASK |
8250                                                      DPLL_PORTB_READY_MASK);
8251         }
8252
8253         if (IS_CHERRYVIEW(dev_priv))
8254                 chv_crtc_clock_get(crtc, pipe_config);
8255         else if (IS_VALLEYVIEW(dev_priv))
8256                 vlv_crtc_clock_get(crtc, pipe_config);
8257         else
8258                 i9xx_crtc_clock_get(crtc, pipe_config);
8259
8260         /*
8261          * Normally the dotclock is filled in by the encoder .get_config()
8262          * but in case the pipe is enabled w/o any ports we need a sane
8263          * default.
8264          */
8265         pipe_config->base.adjusted_mode.crtc_clock =
8266                 pipe_config->port_clock / pipe_config->pixel_multiplier;
8267
8268         ret = true;
8269
8270 out:
8271         intel_display_power_put(dev_priv, power_domain, wakeref);
8272
8273         return ret;
8274 }
8275
8276 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
8277 {
8278         struct intel_encoder *encoder;
8279         int i;
8280         u32 val, final;
8281         bool has_lvds = false;
8282         bool has_cpu_edp = false;
8283         bool has_panel = false;
8284         bool has_ck505 = false;
8285         bool can_ssc = false;
8286         bool using_ssc_source = false;
8287
8288         /* We need to take the global config into account */
8289         for_each_intel_encoder(&dev_priv->drm, encoder) {
8290                 switch (encoder->type) {
8291                 case INTEL_OUTPUT_LVDS:
8292                         has_panel = true;
8293                         has_lvds = true;
8294                         break;
8295                 case INTEL_OUTPUT_EDP:
8296                         has_panel = true;
8297                         if (encoder->port == PORT_A)
8298                                 has_cpu_edp = true;
8299                         break;
8300                 default:
8301                         break;
8302                 }
8303         }
8304
8305         if (HAS_PCH_IBX(dev_priv)) {
8306                 has_ck505 = dev_priv->vbt.display_clock_mode;
8307                 can_ssc = has_ck505;
8308         } else {
8309                 has_ck505 = false;
8310                 can_ssc = true;
8311         }
8312
8313         /* Check if any DPLLs are using the SSC source */
8314         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8315                 u32 temp = I915_READ(PCH_DPLL(i));
8316
8317                 if (!(temp & DPLL_VCO_ENABLE))
8318                         continue;
8319
8320                 if ((temp & PLL_REF_INPUT_MASK) ==
8321                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8322                         using_ssc_source = true;
8323                         break;
8324                 }
8325         }
8326
8327         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8328                       has_panel, has_lvds, has_ck505, using_ssc_source);
8329
8330         /* Ironlake: try to setup display ref clock before DPLL
8331          * enabling. This is only under driver's control after
8332          * PCH B stepping, previous chipset stepping should be
8333          * ignoring this setting.
8334          */
8335         val = I915_READ(PCH_DREF_CONTROL);
8336
8337         /* As we must carefully and slowly disable/enable each source in turn,
8338          * compute the final state we want first and check if we need to
8339          * make any changes at all.
8340          */
8341         final = val;
8342         final &= ~DREF_NONSPREAD_SOURCE_MASK;
8343         if (has_ck505)
8344                 final |= DREF_NONSPREAD_CK505_ENABLE;
8345         else
8346                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8347
8348         final &= ~DREF_SSC_SOURCE_MASK;
8349         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8350         final &= ~DREF_SSC1_ENABLE;
8351
8352         if (has_panel) {
8353                 final |= DREF_SSC_SOURCE_ENABLE;
8354
8355                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8356                         final |= DREF_SSC1_ENABLE;
8357
8358                 if (has_cpu_edp) {
8359                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
8360                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8361                         else
8362                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8363                 } else
8364                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8365         } else if (using_ssc_source) {
8366                 final |= DREF_SSC_SOURCE_ENABLE;
8367                 final |= DREF_SSC1_ENABLE;
8368         }
8369
8370         if (final == val)
8371                 return;
8372
8373         /* Always enable nonspread source */
8374         val &= ~DREF_NONSPREAD_SOURCE_MASK;
8375
8376         if (has_ck505)
8377                 val |= DREF_NONSPREAD_CK505_ENABLE;
8378         else
8379                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8380
8381         if (has_panel) {
8382                 val &= ~DREF_SSC_SOURCE_MASK;
8383                 val |= DREF_SSC_SOURCE_ENABLE;
8384
8385                 /* SSC must be turned on before enabling the CPU output  */
8386                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8387                         DRM_DEBUG_KMS("Using SSC on panel\n");
8388                         val |= DREF_SSC1_ENABLE;
8389                 } else
8390                         val &= ~DREF_SSC1_ENABLE;
8391
8392                 /* Get SSC going before enabling the outputs */
8393                 I915_WRITE(PCH_DREF_CONTROL, val);
8394                 POSTING_READ(PCH_DREF_CONTROL);
8395                 udelay(200);
8396
8397                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8398
8399                 /* Enable CPU source on CPU attached eDP */
8400                 if (has_cpu_edp) {
8401                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8402                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
8403                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8404                         } else
8405                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8406                 } else
8407                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8408
8409                 I915_WRITE(PCH_DREF_CONTROL, val);
8410                 POSTING_READ(PCH_DREF_CONTROL);
8411                 udelay(200);
8412         } else {
8413                 DRM_DEBUG_KMS("Disabling CPU source output\n");
8414
8415                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8416
8417                 /* Turn off CPU output */
8418                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8419
8420                 I915_WRITE(PCH_DREF_CONTROL, val);
8421                 POSTING_READ(PCH_DREF_CONTROL);
8422                 udelay(200);
8423
8424                 if (!using_ssc_source) {
8425                         DRM_DEBUG_KMS("Disabling SSC source\n");
8426
8427                         /* Turn off the SSC source */
8428                         val &= ~DREF_SSC_SOURCE_MASK;
8429                         val |= DREF_SSC_SOURCE_DISABLE;
8430
8431                         /* Turn off SSC1 */
8432                         val &= ~DREF_SSC1_ENABLE;
8433
8434                         I915_WRITE(PCH_DREF_CONTROL, val);
8435                         POSTING_READ(PCH_DREF_CONTROL);
8436                         udelay(200);
8437                 }
8438         }
8439
8440         BUG_ON(val != final);
8441 }
8442
8443 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8444 {
8445         u32 tmp;
8446
8447         tmp = I915_READ(SOUTH_CHICKEN2);
8448         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8449         I915_WRITE(SOUTH_CHICKEN2, tmp);
8450
8451         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8452                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8453                 DRM_ERROR("FDI mPHY reset assert timeout\n");
8454
8455         tmp = I915_READ(SOUTH_CHICKEN2);
8456         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8457         I915_WRITE(SOUTH_CHICKEN2, tmp);
8458
8459         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8460                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8461                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8462 }
8463
8464 /* WaMPhyProgramming:hsw */
8465 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8466 {
8467         u32 tmp;
8468
8469         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8470         tmp &= ~(0xFF << 24);
8471         tmp |= (0x12 << 24);
8472         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8473
8474         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8475         tmp |= (1 << 11);
8476         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8477
8478         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8479         tmp |= (1 << 11);
8480         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8481
8482         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8483         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8484         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8485
8486         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8487         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8488         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8489
8490         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8491         tmp &= ~(7 << 13);
8492         tmp |= (5 << 13);
8493         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8494
8495         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8496         tmp &= ~(7 << 13);
8497         tmp |= (5 << 13);
8498         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8499
8500         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8501         tmp &= ~0xFF;
8502         tmp |= 0x1C;
8503         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8504
8505         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8506         tmp &= ~0xFF;
8507         tmp |= 0x1C;
8508         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8509
8510         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8511         tmp &= ~(0xFF << 16);
8512         tmp |= (0x1C << 16);
8513         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8514
8515         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8516         tmp &= ~(0xFF << 16);
8517         tmp |= (0x1C << 16);
8518         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8519
8520         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8521         tmp |= (1 << 27);
8522         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8523
8524         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8525         tmp |= (1 << 27);
8526         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8527
8528         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8529         tmp &= ~(0xF << 28);
8530         tmp |= (4 << 28);
8531         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8532
8533         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8534         tmp &= ~(0xF << 28);
8535         tmp |= (4 << 28);
8536         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8537 }
8538
8539 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8540  * Programming" based on the parameters passed:
8541  * - Sequence to enable CLKOUT_DP
8542  * - Sequence to enable CLKOUT_DP without spread
8543  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8544  */
8545 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8546                                  bool with_spread, bool with_fdi)
8547 {
8548         u32 reg, tmp;
8549
8550         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8551                 with_spread = true;
8552         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
8553             with_fdi, "LP PCH doesn't have FDI\n"))
8554                 with_fdi = false;
8555
8556         mutex_lock(&dev_priv->sb_lock);
8557
8558         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8559         tmp &= ~SBI_SSCCTL_DISABLE;
8560         tmp |= SBI_SSCCTL_PATHALT;
8561         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8562
8563         udelay(24);
8564
8565         if (with_spread) {
8566                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8567                 tmp &= ~SBI_SSCCTL_PATHALT;
8568                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8569
8570                 if (with_fdi) {
8571                         lpt_reset_fdi_mphy(dev_priv);
8572                         lpt_program_fdi_mphy(dev_priv);
8573                 }
8574         }
8575
8576         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8577         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8578         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8579         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8580
8581         mutex_unlock(&dev_priv->sb_lock);
8582 }
8583
8584 /* Sequence to disable CLKOUT_DP */
8585 static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
8586 {
8587         u32 reg, tmp;
8588
8589         mutex_lock(&dev_priv->sb_lock);
8590
8591         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8592         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8593         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8594         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8595
8596         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8597         if (!(tmp & SBI_SSCCTL_DISABLE)) {
8598                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8599                         tmp |= SBI_SSCCTL_PATHALT;
8600                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8601                         udelay(32);
8602                 }
8603                 tmp |= SBI_SSCCTL_DISABLE;
8604                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8605         }
8606
8607         mutex_unlock(&dev_priv->sb_lock);
8608 }
8609
8610 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8611
8612 static const u16 sscdivintphase[] = {
8613         [BEND_IDX( 50)] = 0x3B23,
8614         [BEND_IDX( 45)] = 0x3B23,
8615         [BEND_IDX( 40)] = 0x3C23,
8616         [BEND_IDX( 35)] = 0x3C23,
8617         [BEND_IDX( 30)] = 0x3D23,
8618         [BEND_IDX( 25)] = 0x3D23,
8619         [BEND_IDX( 20)] = 0x3E23,
8620         [BEND_IDX( 15)] = 0x3E23,
8621         [BEND_IDX( 10)] = 0x3F23,
8622         [BEND_IDX(  5)] = 0x3F23,
8623         [BEND_IDX(  0)] = 0x0025,
8624         [BEND_IDX( -5)] = 0x0025,
8625         [BEND_IDX(-10)] = 0x0125,
8626         [BEND_IDX(-15)] = 0x0125,
8627         [BEND_IDX(-20)] = 0x0225,
8628         [BEND_IDX(-25)] = 0x0225,
8629         [BEND_IDX(-30)] = 0x0325,
8630         [BEND_IDX(-35)] = 0x0325,
8631         [BEND_IDX(-40)] = 0x0425,
8632         [BEND_IDX(-45)] = 0x0425,
8633         [BEND_IDX(-50)] = 0x0525,
8634 };
8635
8636 /*
8637  * Bend CLKOUT_DP
8638  * steps -50 to 50 inclusive, in steps of 5
8639  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8640  * change in clock period = -(steps / 10) * 5.787 ps
8641  */
8642 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8643 {
8644         u32 tmp;
8645         int idx = BEND_IDX(steps);
8646
8647         if (WARN_ON(steps % 5 != 0))
8648                 return;
8649
8650         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8651                 return;
8652
8653         mutex_lock(&dev_priv->sb_lock);
8654
8655         if (steps % 10 != 0)
8656                 tmp = 0xAAAAAAAB;
8657         else
8658                 tmp = 0x00000000;
8659         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8660
8661         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8662         tmp &= 0xffff0000;
8663         tmp |= sscdivintphase[idx];
8664         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8665
8666         mutex_unlock(&dev_priv->sb_lock);
8667 }
8668
8669 #undef BEND_IDX
8670
8671 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
8672 {
8673         struct intel_encoder *encoder;
8674         bool has_vga = false;
8675
8676         for_each_intel_encoder(&dev_priv->drm, encoder) {
8677                 switch (encoder->type) {
8678                 case INTEL_OUTPUT_ANALOG:
8679                         has_vga = true;
8680                         break;
8681                 default:
8682                         break;
8683                 }
8684         }
8685
8686         if (has_vga) {
8687                 lpt_bend_clkout_dp(dev_priv, 0);
8688                 lpt_enable_clkout_dp(dev_priv, true, true);
8689         } else {
8690                 lpt_disable_clkout_dp(dev_priv);
8691         }
8692 }
8693
8694 /*
8695  * Initialize reference clocks when the driver loads
8696  */
8697 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
8698 {
8699         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
8700                 ironlake_init_pch_refclk(dev_priv);
8701         else if (HAS_PCH_LPT(dev_priv))
8702                 lpt_init_pch_refclk(dev_priv);
8703 }
8704
8705 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
8706 {
8707         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8708         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8709         enum pipe pipe = crtc->pipe;
8710         u32 val;
8711
8712         val = 0;
8713
8714         switch (crtc_state->pipe_bpp) {
8715         case 18:
8716                 val |= PIPECONF_6BPC;
8717                 break;
8718         case 24:
8719                 val |= PIPECONF_8BPC;
8720                 break;
8721         case 30:
8722                 val |= PIPECONF_10BPC;
8723                 break;
8724         case 36:
8725                 val |= PIPECONF_12BPC;
8726                 break;
8727         default:
8728                 /* Case prevented by intel_choose_pipe_bpp_dither. */
8729                 BUG();
8730         }
8731
8732         if (crtc_state->dither)
8733                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8734
8735         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8736                 val |= PIPECONF_INTERLACED_ILK;
8737         else
8738                 val |= PIPECONF_PROGRESSIVE;
8739
8740         if (crtc_state->limited_color_range)
8741                 val |= PIPECONF_COLOR_RANGE_SELECT;
8742
8743         I915_WRITE(PIPECONF(pipe), val);
8744         POSTING_READ(PIPECONF(pipe));
8745 }
8746
8747 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
8748 {
8749         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8750         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8751         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8752         u32 val = 0;
8753
8754         if (IS_HASWELL(dev_priv) && crtc_state->dither)
8755                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8756
8757         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8758                 val |= PIPECONF_INTERLACED_ILK;
8759         else
8760                 val |= PIPECONF_PROGRESSIVE;
8761
8762         I915_WRITE(PIPECONF(cpu_transcoder), val);
8763         POSTING_READ(PIPECONF(cpu_transcoder));
8764 }
8765
8766 static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
8767 {
8768         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
8769         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
8770
8771         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8772                 u32 val = 0;
8773
8774                 switch (crtc_state->pipe_bpp) {
8775                 case 18:
8776                         val |= PIPEMISC_DITHER_6_BPC;
8777                         break;
8778                 case 24:
8779                         val |= PIPEMISC_DITHER_8_BPC;
8780                         break;
8781                 case 30:
8782                         val |= PIPEMISC_DITHER_10_BPC;
8783                         break;
8784                 case 36:
8785                         val |= PIPEMISC_DITHER_12_BPC;
8786                         break;
8787                 default:
8788                         /* Case prevented by pipe_config_set_bpp. */
8789                         BUG();
8790                 }
8791
8792                 if (crtc_state->dither)
8793                         val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8794
8795                 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
8796                     crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
8797                         val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
8798
8799                 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
8800                         val |= PIPEMISC_YUV420_ENABLE |
8801                                 PIPEMISC_YUV420_MODE_FULL_BLEND;
8802
8803                 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8804         }
8805 }
8806
8807 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8808 {
8809         /*
8810          * Account for spread spectrum to avoid
8811          * oversubscribing the link. Max center spread
8812          * is 2.5%; use 5% for safety's sake.
8813          */
8814         u32 bps = target_clock * bpp * 21 / 20;
8815         return DIV_ROUND_UP(bps, link_bw * 8);
8816 }
8817
8818 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8819 {
8820         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8821 }
8822
8823 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8824                                   struct intel_crtc_state *crtc_state,
8825                                   struct dpll *reduced_clock)
8826 {
8827         struct drm_crtc *crtc = &intel_crtc->base;
8828         struct drm_device *dev = crtc->dev;
8829         struct drm_i915_private *dev_priv = to_i915(dev);
8830         u32 dpll, fp, fp2;
8831         int factor;
8832
8833         /* Enable autotuning of the PLL clock (if permissible) */
8834         factor = 21;
8835         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8836                 if ((intel_panel_use_ssc(dev_priv) &&
8837                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
8838                     (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
8839                         factor = 25;
8840         } else if (crtc_state->sdvo_tv_clock)
8841                 factor = 20;
8842
8843         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8844
8845         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8846                 fp |= FP_CB_TUNE;
8847
8848         if (reduced_clock) {
8849                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8850
8851                 if (reduced_clock->m < factor * reduced_clock->n)
8852                         fp2 |= FP_CB_TUNE;
8853         } else {
8854                 fp2 = fp;
8855         }
8856
8857         dpll = 0;
8858
8859         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8860                 dpll |= DPLLB_MODE_LVDS;
8861         else
8862                 dpll |= DPLLB_MODE_DAC_SERIAL;
8863
8864         dpll |= (crtc_state->pixel_multiplier - 1)
8865                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8866
8867         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8868             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8869                 dpll |= DPLL_SDVO_HIGH_SPEED;
8870
8871         if (intel_crtc_has_dp_encoder(crtc_state))
8872                 dpll |= DPLL_SDVO_HIGH_SPEED;
8873
8874         /*
8875          * The high speed IO clock is only really required for
8876          * SDVO/HDMI/DP, but we also enable it for CRT to make it
8877          * possible to share the DPLL between CRT and HDMI. Enabling
8878          * the clock needlessly does no real harm, except use up a
8879          * bit of power potentially.
8880          *
8881          * We'll limit this to IVB with 3 pipes, since it has only two
8882          * DPLLs and so DPLL sharing is the only way to get three pipes
8883          * driving PCH ports at the same time. On SNB we could do this,
8884          * and potentially avoid enabling the second DPLL, but it's not
8885          * clear if it''s a win or loss power wise. No point in doing
8886          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
8887          */
8888         if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
8889             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
8890                 dpll |= DPLL_SDVO_HIGH_SPEED;
8891
8892         /* compute bitmask from p1 value */
8893         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8894         /* also FPA1 */
8895         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8896
8897         switch (crtc_state->dpll.p2) {
8898         case 5:
8899                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8900                 break;
8901         case 7:
8902                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8903                 break;
8904         case 10:
8905                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8906                 break;
8907         case 14:
8908                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8909                 break;
8910         }
8911
8912         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8913             intel_panel_use_ssc(dev_priv))
8914                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8915         else
8916                 dpll |= PLL_REF_INPUT_DREFCLK;
8917
8918         dpll |= DPLL_VCO_ENABLE;
8919
8920         crtc_state->dpll_hw_state.dpll = dpll;
8921         crtc_state->dpll_hw_state.fp0 = fp;
8922         crtc_state->dpll_hw_state.fp1 = fp2;
8923 }
8924
8925 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8926                                        struct intel_crtc_state *crtc_state)
8927 {
8928         struct drm_device *dev = crtc->base.dev;
8929         struct drm_i915_private *dev_priv = to_i915(dev);
8930         const struct intel_limit *limit;
8931         int refclk = 120000;
8932
8933         memset(&crtc_state->dpll_hw_state, 0,
8934                sizeof(crtc_state->dpll_hw_state));
8935
8936         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8937         if (!crtc_state->has_pch_encoder)
8938                 return 0;
8939
8940         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8941                 if (intel_panel_use_ssc(dev_priv)) {
8942                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8943                                       dev_priv->vbt.lvds_ssc_freq);
8944                         refclk = dev_priv->vbt.lvds_ssc_freq;
8945                 }
8946
8947                 if (intel_is_dual_link_lvds(dev)) {
8948                         if (refclk == 100000)
8949                                 limit = &intel_limits_ironlake_dual_lvds_100m;
8950                         else
8951                                 limit = &intel_limits_ironlake_dual_lvds;
8952                 } else {
8953                         if (refclk == 100000)
8954                                 limit = &intel_limits_ironlake_single_lvds_100m;
8955                         else
8956                                 limit = &intel_limits_ironlake_single_lvds;
8957                 }
8958         } else {
8959                 limit = &intel_limits_ironlake_dac;
8960         }
8961
8962         if (!crtc_state->clock_set &&
8963             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8964                                 refclk, NULL, &crtc_state->dpll)) {
8965                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8966                 return -EINVAL;
8967         }
8968
8969         ironlake_compute_dpll(crtc, crtc_state, NULL);
8970
8971         if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
8972                 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
8973                               pipe_name(crtc->pipe));
8974                 return -EINVAL;
8975         }
8976
8977         return 0;
8978 }
8979
8980 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8981                                          struct intel_link_m_n *m_n)
8982 {
8983         struct drm_device *dev = crtc->base.dev;
8984         struct drm_i915_private *dev_priv = to_i915(dev);
8985         enum pipe pipe = crtc->pipe;
8986
8987         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8988         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8989         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8990                 & ~TU_SIZE_MASK;
8991         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8992         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8993                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8994 }
8995
8996 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8997                                          enum transcoder transcoder,
8998                                          struct intel_link_m_n *m_n,
8999                                          struct intel_link_m_n *m2_n2)
9000 {
9001         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9002         enum pipe pipe = crtc->pipe;
9003
9004         if (INTEL_GEN(dev_priv) >= 5) {
9005                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9006                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9007                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9008                         & ~TU_SIZE_MASK;
9009                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9010                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9011                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9012
9013                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9014                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9015                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9016                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9017                                         & ~TU_SIZE_MASK;
9018                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9019                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9020                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9021                 }
9022         } else {
9023                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9024                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9025                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9026                         & ~TU_SIZE_MASK;
9027                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9028                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9029                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9030         }
9031 }
9032
9033 void intel_dp_get_m_n(struct intel_crtc *crtc,
9034                       struct intel_crtc_state *pipe_config)
9035 {
9036         if (pipe_config->has_pch_encoder)
9037                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9038         else
9039                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9040                                              &pipe_config->dp_m_n,
9041                                              &pipe_config->dp_m2_n2);
9042 }
9043
9044 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9045                                         struct intel_crtc_state *pipe_config)
9046 {
9047         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9048                                      &pipe_config->fdi_m_n, NULL);
9049 }
9050
9051 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9052                                     struct intel_crtc_state *pipe_config)
9053 {
9054         struct drm_device *dev = crtc->base.dev;
9055         struct drm_i915_private *dev_priv = to_i915(dev);
9056         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9057         u32 ps_ctrl = 0;
9058         int id = -1;
9059         int i;
9060
9061         /* find scaler attached to this pipe */
9062         for (i = 0; i < crtc->num_scalers; i++) {
9063                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9064                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9065                         id = i;
9066                         pipe_config->pch_pfit.enabled = true;
9067                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9068                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9069                         scaler_state->scalers[i].in_use = true;
9070                         break;
9071                 }
9072         }
9073
9074         scaler_state->scaler_id = id;
9075         if (id >= 0) {
9076                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9077         } else {
9078                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9079         }
9080 }
9081
9082 static void
9083 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9084                                  struct intel_initial_plane_config *plane_config)
9085 {
9086         struct drm_device *dev = crtc->base.dev;
9087         struct drm_i915_private *dev_priv = to_i915(dev);
9088         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9089         enum plane_id plane_id = plane->id;
9090         enum pipe pipe;
9091         u32 val, base, offset, stride_mult, tiling, alpha;
9092         int fourcc, pixel_format;
9093         unsigned int aligned_height;
9094         struct drm_framebuffer *fb;
9095         struct intel_framebuffer *intel_fb;
9096
9097         if (!plane->get_hw_state(plane, &pipe))
9098                 return;
9099
9100         WARN_ON(pipe != crtc->pipe);
9101
9102         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9103         if (!intel_fb) {
9104                 DRM_DEBUG_KMS("failed to alloc fb\n");
9105                 return;
9106         }
9107
9108         fb = &intel_fb->base;
9109
9110         fb->dev = dev;
9111
9112         val = I915_READ(PLANE_CTL(pipe, plane_id));
9113
9114         if (INTEL_GEN(dev_priv) >= 11)
9115                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9116         else
9117                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9118
9119         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
9120                 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
9121                 alpha &= PLANE_COLOR_ALPHA_MASK;
9122         } else {
9123                 alpha = val & PLANE_CTL_ALPHA_MASK;
9124         }
9125
9126         fourcc = skl_format_to_fourcc(pixel_format,
9127                                       val & PLANE_CTL_ORDER_RGBX, alpha);
9128         fb->format = drm_format_info(fourcc);
9129
9130         tiling = val & PLANE_CTL_TILED_MASK;
9131         switch (tiling) {
9132         case PLANE_CTL_TILED_LINEAR:
9133                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
9134                 break;
9135         case PLANE_CTL_TILED_X:
9136                 plane_config->tiling = I915_TILING_X;
9137                 fb->modifier = I915_FORMAT_MOD_X_TILED;
9138                 break;
9139         case PLANE_CTL_TILED_Y:
9140                 plane_config->tiling = I915_TILING_Y;
9141                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9142                         fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
9143                 else
9144                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
9145                 break;
9146         case PLANE_CTL_TILED_YF:
9147                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9148                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
9149                 else
9150                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
9151                 break;
9152         default:
9153                 MISSING_CASE(tiling);
9154                 goto error;
9155         }
9156
9157         /*
9158          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
9159          * while i915 HW rotation is clockwise, thats why this swapping.
9160          */
9161         switch (val & PLANE_CTL_ROTATE_MASK) {
9162         case PLANE_CTL_ROTATE_0:
9163                 plane_config->rotation = DRM_MODE_ROTATE_0;
9164                 break;
9165         case PLANE_CTL_ROTATE_90:
9166                 plane_config->rotation = DRM_MODE_ROTATE_270;
9167                 break;
9168         case PLANE_CTL_ROTATE_180:
9169                 plane_config->rotation = DRM_MODE_ROTATE_180;
9170                 break;
9171         case PLANE_CTL_ROTATE_270:
9172                 plane_config->rotation = DRM_MODE_ROTATE_90;
9173                 break;
9174         }
9175
9176         if (INTEL_GEN(dev_priv) >= 10 &&
9177             val & PLANE_CTL_FLIP_HORIZONTAL)
9178                 plane_config->rotation |= DRM_MODE_REFLECT_X;
9179
9180         base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
9181         plane_config->base = base;
9182
9183         offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
9184
9185         val = I915_READ(PLANE_SIZE(pipe, plane_id));
9186         fb->height = ((val >> 16) & 0xfff) + 1;
9187         fb->width = ((val >> 0) & 0x1fff) + 1;
9188
9189         val = I915_READ(PLANE_STRIDE(pipe, plane_id));
9190         stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
9191         fb->pitches[0] = (val & 0x3ff) * stride_mult;
9192
9193         aligned_height = intel_fb_align_height(fb, 0, fb->height);
9194
9195         plane_config->size = fb->pitches[0] * aligned_height;
9196
9197         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9198                       crtc->base.name, plane->base.name, fb->width, fb->height,
9199                       fb->format->cpp[0] * 8, base, fb->pitches[0],
9200                       plane_config->size);
9201
9202         plane_config->fb = intel_fb;
9203         return;
9204
9205 error:
9206         kfree(intel_fb);
9207 }
9208
9209 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9210                                      struct intel_crtc_state *pipe_config)
9211 {
9212         struct drm_device *dev = crtc->base.dev;
9213         struct drm_i915_private *dev_priv = to_i915(dev);
9214         u32 tmp;
9215
9216         tmp = I915_READ(PF_CTL(crtc->pipe));
9217
9218         if (tmp & PF_ENABLE) {
9219                 pipe_config->pch_pfit.enabled = true;
9220                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9221                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9222
9223                 /* We currently do not free assignements of panel fitters on
9224                  * ivb/hsw (since we don't use the higher upscaling modes which
9225                  * differentiates them) so just WARN about this case for now. */
9226                 if (IS_GEN(dev_priv, 7)) {
9227                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9228                                 PF_PIPE_SEL_IVB(crtc->pipe));
9229                 }
9230         }
9231 }
9232
9233 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9234                                      struct intel_crtc_state *pipe_config)
9235 {
9236         struct drm_device *dev = crtc->base.dev;
9237         struct drm_i915_private *dev_priv = to_i915(dev);
9238         enum intel_display_power_domain power_domain;
9239         intel_wakeref_t wakeref;
9240         u32 tmp;
9241         bool ret;
9242
9243         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9244         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9245         if (!wakeref)
9246                 return false;
9247
9248         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9249         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9250         pipe_config->shared_dpll = NULL;
9251
9252         ret = false;
9253         tmp = I915_READ(PIPECONF(crtc->pipe));
9254         if (!(tmp & PIPECONF_ENABLE))
9255                 goto out;
9256
9257         switch (tmp & PIPECONF_BPC_MASK) {
9258         case PIPECONF_6BPC:
9259                 pipe_config->pipe_bpp = 18;
9260                 break;
9261         case PIPECONF_8BPC:
9262                 pipe_config->pipe_bpp = 24;
9263                 break;
9264         case PIPECONF_10BPC:
9265                 pipe_config->pipe_bpp = 30;
9266                 break;
9267         case PIPECONF_12BPC:
9268                 pipe_config->pipe_bpp = 36;
9269                 break;
9270         default:
9271                 break;
9272         }
9273
9274         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9275                 pipe_config->limited_color_range = true;
9276
9277         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9278                 struct intel_shared_dpll *pll;
9279                 enum intel_dpll_id pll_id;
9280
9281                 pipe_config->has_pch_encoder = true;
9282
9283                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9284                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9285                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
9286
9287                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9288
9289                 if (HAS_PCH_IBX(dev_priv)) {
9290                         /*
9291                          * The pipe->pch transcoder and pch transcoder->pll
9292                          * mapping is fixed.
9293                          */
9294                         pll_id = (enum intel_dpll_id) crtc->pipe;
9295                 } else {
9296                         tmp = I915_READ(PCH_DPLL_SEL);
9297                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9298                                 pll_id = DPLL_ID_PCH_PLL_B;
9299                         else
9300                                 pll_id= DPLL_ID_PCH_PLL_A;
9301                 }
9302
9303                 pipe_config->shared_dpll =
9304                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
9305                 pll = pipe_config->shared_dpll;
9306
9307                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9308                                                 &pipe_config->dpll_hw_state));
9309
9310                 tmp = pipe_config->dpll_hw_state.dpll;
9311                 pipe_config->pixel_multiplier =
9312                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9313                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9314
9315                 ironlake_pch_clock_get(crtc, pipe_config);
9316         } else {
9317                 pipe_config->pixel_multiplier = 1;
9318         }
9319
9320         intel_get_pipe_timings(crtc, pipe_config);
9321         intel_get_pipe_src_size(crtc, pipe_config);
9322
9323         ironlake_get_pfit_config(crtc, pipe_config);
9324
9325         ret = true;
9326
9327 out:
9328         intel_display_power_put(dev_priv, power_domain, wakeref);
9329
9330         return ret;
9331 }
9332
9333 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9334 {
9335         struct drm_device *dev = &dev_priv->drm;
9336         struct intel_crtc *crtc;
9337
9338         for_each_intel_crtc(dev, crtc)
9339                 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9340                      pipe_name(crtc->pipe));
9341
9342         I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
9343                         "Display power well on\n");
9344         I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9345         I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9346         I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9347         I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
9348         I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9349              "CPU PWM1 enabled\n");
9350         if (IS_HASWELL(dev_priv))
9351                 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9352                      "CPU PWM2 enabled\n");
9353         I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9354              "PCH PWM1 enabled\n");
9355         I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9356              "Utility pin enabled\n");
9357         I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9358
9359         /*
9360          * In theory we can still leave IRQs enabled, as long as only the HPD
9361          * interrupts remain enabled. We used to check for that, but since it's
9362          * gen-specific and since we only disable LCPLL after we fully disable
9363          * the interrupts, the check below should be enough.
9364          */
9365         I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9366 }
9367
9368 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
9369 {
9370         if (IS_HASWELL(dev_priv))
9371                 return I915_READ(D_COMP_HSW);
9372         else
9373                 return I915_READ(D_COMP_BDW);
9374 }
9375
9376 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
9377 {
9378         if (IS_HASWELL(dev_priv)) {
9379                 mutex_lock(&dev_priv->pcu_lock);
9380                 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9381                                             val))
9382                         DRM_DEBUG_KMS("Failed to write to D_COMP\n");
9383                 mutex_unlock(&dev_priv->pcu_lock);
9384         } else {
9385                 I915_WRITE(D_COMP_BDW, val);
9386                 POSTING_READ(D_COMP_BDW);
9387         }
9388 }
9389
9390 /*
9391  * This function implements pieces of two sequences from BSpec:
9392  * - Sequence for display software to disable LCPLL
9393  * - Sequence for display software to allow package C8+
9394  * The steps implemented here are just the steps that actually touch the LCPLL
9395  * register. Callers should take care of disabling all the display engine
9396  * functions, doing the mode unset, fixing interrupts, etc.
9397  */
9398 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9399                               bool switch_to_fclk, bool allow_power_down)
9400 {
9401         u32 val;
9402
9403         assert_can_disable_lcpll(dev_priv);
9404
9405         val = I915_READ(LCPLL_CTL);
9406
9407         if (switch_to_fclk) {
9408                 val |= LCPLL_CD_SOURCE_FCLK;
9409                 I915_WRITE(LCPLL_CTL, val);
9410
9411                 if (wait_for_us(I915_READ(LCPLL_CTL) &
9412                                 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9413                         DRM_ERROR("Switching to FCLK failed\n");
9414
9415                 val = I915_READ(LCPLL_CTL);
9416         }
9417
9418         val |= LCPLL_PLL_DISABLE;
9419         I915_WRITE(LCPLL_CTL, val);
9420         POSTING_READ(LCPLL_CTL);
9421
9422         if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
9423                 DRM_ERROR("LCPLL still locked\n");
9424
9425         val = hsw_read_dcomp(dev_priv);
9426         val |= D_COMP_COMP_DISABLE;
9427         hsw_write_dcomp(dev_priv, val);
9428         ndelay(100);
9429
9430         if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9431                      1))
9432                 DRM_ERROR("D_COMP RCOMP still in progress\n");
9433
9434         if (allow_power_down) {
9435                 val = I915_READ(LCPLL_CTL);
9436                 val |= LCPLL_POWER_DOWN_ALLOW;
9437                 I915_WRITE(LCPLL_CTL, val);
9438                 POSTING_READ(LCPLL_CTL);
9439         }
9440 }
9441
9442 /*
9443  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9444  * source.
9445  */
9446 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9447 {
9448         u32 val;
9449
9450         val = I915_READ(LCPLL_CTL);
9451
9452         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9453                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9454                 return;
9455
9456         /*
9457          * Make sure we're not on PC8 state before disabling PC8, otherwise
9458          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9459          */
9460         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9461
9462         if (val & LCPLL_POWER_DOWN_ALLOW) {
9463                 val &= ~LCPLL_POWER_DOWN_ALLOW;
9464                 I915_WRITE(LCPLL_CTL, val);
9465                 POSTING_READ(LCPLL_CTL);
9466         }
9467
9468         val = hsw_read_dcomp(dev_priv);
9469         val |= D_COMP_COMP_FORCE;
9470         val &= ~D_COMP_COMP_DISABLE;
9471         hsw_write_dcomp(dev_priv, val);
9472
9473         val = I915_READ(LCPLL_CTL);
9474         val &= ~LCPLL_PLL_DISABLE;
9475         I915_WRITE(LCPLL_CTL, val);
9476
9477         if (intel_wait_for_register(dev_priv,
9478                                     LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9479                                     5))
9480                 DRM_ERROR("LCPLL not locked yet\n");
9481
9482         if (val & LCPLL_CD_SOURCE_FCLK) {
9483                 val = I915_READ(LCPLL_CTL);
9484                 val &= ~LCPLL_CD_SOURCE_FCLK;
9485                 I915_WRITE(LCPLL_CTL, val);
9486
9487                 if (wait_for_us((I915_READ(LCPLL_CTL) &
9488                                  LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9489                         DRM_ERROR("Switching back to LCPLL failed\n");
9490         }
9491
9492         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9493
9494         intel_update_cdclk(dev_priv);
9495         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
9496 }
9497
9498 /*
9499  * Package states C8 and deeper are really deep PC states that can only be
9500  * reached when all the devices on the system allow it, so even if the graphics
9501  * device allows PC8+, it doesn't mean the system will actually get to these
9502  * states. Our driver only allows PC8+ when going into runtime PM.
9503  *
9504  * The requirements for PC8+ are that all the outputs are disabled, the power
9505  * well is disabled and most interrupts are disabled, and these are also
9506  * requirements for runtime PM. When these conditions are met, we manually do
9507  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9508  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9509  * hang the machine.
9510  *
9511  * When we really reach PC8 or deeper states (not just when we allow it) we lose
9512  * the state of some registers, so when we come back from PC8+ we need to
9513  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9514  * need to take care of the registers kept by RC6. Notice that this happens even
9515  * if we don't put the device in PCI D3 state (which is what currently happens
9516  * because of the runtime PM support).
9517  *
9518  * For more, read "Display Sequences for Package C8" on the hardware
9519  * documentation.
9520  */
9521 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9522 {
9523         u32 val;
9524
9525         DRM_DEBUG_KMS("Enabling package C8+\n");
9526
9527         if (HAS_PCH_LPT_LP(dev_priv)) {
9528                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9529                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9530                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9531         }
9532
9533         lpt_disable_clkout_dp(dev_priv);
9534         hsw_disable_lcpll(dev_priv, true, true);
9535 }
9536
9537 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9538 {
9539         u32 val;
9540
9541         DRM_DEBUG_KMS("Disabling package C8+\n");
9542
9543         hsw_restore_lcpll(dev_priv);
9544         lpt_init_pch_refclk(dev_priv);
9545
9546         if (HAS_PCH_LPT_LP(dev_priv)) {
9547                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9548                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9549                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9550         }
9551 }
9552
9553 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9554                                       struct intel_crtc_state *crtc_state)
9555 {
9556         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9557         struct intel_atomic_state *state =
9558                 to_intel_atomic_state(crtc_state->base.state);
9559
9560         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
9561             IS_ICELAKE(dev_priv)) {
9562                 struct intel_encoder *encoder =
9563                         intel_get_crtc_new_encoder(state, crtc_state);
9564
9565                 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
9566                         DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9567                                       pipe_name(crtc->pipe));
9568                         return -EINVAL;
9569                 }
9570         }
9571
9572         return 0;
9573 }
9574
9575 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9576                                    enum port port,
9577                                    struct intel_crtc_state *pipe_config)
9578 {
9579         enum intel_dpll_id id;
9580         u32 temp;
9581
9582         temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9583         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9584
9585         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9586                 return;
9587
9588         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9589 }
9590
9591 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9592                                 enum port port,
9593                                 struct intel_crtc_state *pipe_config)
9594 {
9595         enum intel_dpll_id id;
9596         u32 temp;
9597
9598         /* TODO: TBT pll not implemented. */
9599         if (intel_port_is_combophy(dev_priv, port)) {
9600                 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9601                        DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9602                 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9603
9604                 if (WARN_ON(!intel_dpll_is_combophy(id)))
9605                         return;
9606         } else if (intel_port_is_tc(dev_priv, port)) {
9607                 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
9608         } else {
9609                 WARN(1, "Invalid port %x\n", port);
9610                 return;
9611         }
9612
9613         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9614 }
9615
9616 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9617                                 enum port port,
9618                                 struct intel_crtc_state *pipe_config)
9619 {
9620         enum intel_dpll_id id;
9621
9622         switch (port) {
9623         case PORT_A:
9624                 id = DPLL_ID_SKL_DPLL0;
9625                 break;
9626         case PORT_B:
9627                 id = DPLL_ID_SKL_DPLL1;
9628                 break;
9629         case PORT_C:
9630                 id = DPLL_ID_SKL_DPLL2;
9631                 break;
9632         default:
9633                 DRM_ERROR("Incorrect port type\n");
9634                 return;
9635         }
9636
9637         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9638 }
9639
9640 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9641                                 enum port port,
9642                                 struct intel_crtc_state *pipe_config)
9643 {
9644         enum intel_dpll_id id;
9645         u32 temp;
9646
9647         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9648         id = temp >> (port * 3 + 1);
9649
9650         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
9651                 return;
9652
9653         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9654 }
9655
9656 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9657                                 enum port port,
9658                                 struct intel_crtc_state *pipe_config)
9659 {
9660         enum intel_dpll_id id;
9661         u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9662
9663         switch (ddi_pll_sel) {
9664         case PORT_CLK_SEL_WRPLL1:
9665                 id = DPLL_ID_WRPLL1;
9666                 break;
9667         case PORT_CLK_SEL_WRPLL2:
9668                 id = DPLL_ID_WRPLL2;
9669                 break;
9670         case PORT_CLK_SEL_SPLL:
9671                 id = DPLL_ID_SPLL;
9672                 break;
9673         case PORT_CLK_SEL_LCPLL_810:
9674                 id = DPLL_ID_LCPLL_810;
9675                 break;
9676         case PORT_CLK_SEL_LCPLL_1350:
9677                 id = DPLL_ID_LCPLL_1350;
9678                 break;
9679         case PORT_CLK_SEL_LCPLL_2700:
9680                 id = DPLL_ID_LCPLL_2700;
9681                 break;
9682         default:
9683                 MISSING_CASE(ddi_pll_sel);
9684                 /* fall through */
9685         case PORT_CLK_SEL_NONE:
9686                 return;
9687         }
9688
9689         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9690 }
9691
9692 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9693                                      struct intel_crtc_state *pipe_config,
9694                                      u64 *power_domain_mask)
9695 {
9696         struct drm_device *dev = crtc->base.dev;
9697         struct drm_i915_private *dev_priv = to_i915(dev);
9698         enum intel_display_power_domain power_domain;
9699         unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
9700         unsigned long enabled_panel_transcoders = 0;
9701         enum transcoder panel_transcoder;
9702         u32 tmp;
9703
9704         if (IS_ICELAKE(dev_priv))
9705                 panel_transcoder_mask |=
9706                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
9707
9708         /*
9709          * The pipe->transcoder mapping is fixed with the exception of the eDP
9710          * and DSI transcoders handled below.
9711          */
9712         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9713
9714         /*
9715          * XXX: Do intel_display_power_get_if_enabled before reading this (for
9716          * consistency and less surprising code; it's in always on power).
9717          */
9718         for_each_set_bit(panel_transcoder,
9719                          &panel_transcoder_mask,
9720                          ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
9721                 enum pipe trans_pipe;
9722
9723                 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
9724                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
9725                         continue;
9726
9727                 /*
9728                  * Log all enabled ones, only use the first one.
9729                  *
9730                  * FIXME: This won't work for two separate DSI displays.
9731                  */
9732                 enabled_panel_transcoders |= BIT(panel_transcoder);
9733                 if (enabled_panel_transcoders != BIT(panel_transcoder))
9734                         continue;
9735
9736                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9737                 default:
9738                         WARN(1, "unknown pipe linked to transcoder %s\n",
9739                              transcoder_name(panel_transcoder));
9740                         /* fall through */
9741                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9742                 case TRANS_DDI_EDP_INPUT_A_ON:
9743                         trans_pipe = PIPE_A;
9744                         break;
9745                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9746                         trans_pipe = PIPE_B;
9747                         break;
9748                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9749                         trans_pipe = PIPE_C;
9750                         break;
9751                 }
9752
9753                 if (trans_pipe == crtc->pipe)
9754                         pipe_config->cpu_transcoder = panel_transcoder;
9755         }
9756
9757         /*
9758          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
9759          */
9760         WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
9761                 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
9762
9763         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9764         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9765                 return false;
9766
9767         WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
9768         *power_domain_mask |= BIT_ULL(power_domain);
9769
9770         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9771
9772         return tmp & PIPECONF_ENABLE;
9773 }
9774
9775 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9776                                          struct intel_crtc_state *pipe_config,
9777                                          u64 *power_domain_mask)
9778 {
9779         struct drm_device *dev = crtc->base.dev;
9780         struct drm_i915_private *dev_priv = to_i915(dev);
9781         enum intel_display_power_domain power_domain;
9782         enum port port;
9783         enum transcoder cpu_transcoder;
9784         u32 tmp;
9785
9786         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9787                 if (port == PORT_A)
9788                         cpu_transcoder = TRANSCODER_DSI_A;
9789                 else
9790                         cpu_transcoder = TRANSCODER_DSI_C;
9791
9792                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9793                 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9794                         continue;
9795
9796                 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
9797                 *power_domain_mask |= BIT_ULL(power_domain);
9798
9799                 /*
9800                  * The PLL needs to be enabled with a valid divider
9801                  * configuration, otherwise accessing DSI registers will hang
9802                  * the machine. See BSpec North Display Engine
9803                  * registers/MIPI[BXT]. We can break out here early, since we
9804                  * need the same DSI PLL to be enabled for both DSI ports.
9805                  */
9806                 if (!bxt_dsi_pll_is_enabled(dev_priv))
9807                         break;
9808
9809                 /* XXX: this works for video mode only */
9810                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9811                 if (!(tmp & DPI_ENABLE))
9812                         continue;
9813
9814                 tmp = I915_READ(MIPI_CTRL(port));
9815                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9816                         continue;
9817
9818                 pipe_config->cpu_transcoder = cpu_transcoder;
9819                 break;
9820         }
9821
9822         return transcoder_is_dsi(pipe_config->cpu_transcoder);
9823 }
9824
9825 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9826                                        struct intel_crtc_state *pipe_config)
9827 {
9828         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9829         struct intel_shared_dpll *pll;
9830         enum port port;
9831         u32 tmp;
9832
9833         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9834
9835         port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9836
9837         if (IS_ICELAKE(dev_priv))
9838                 icelake_get_ddi_pll(dev_priv, port, pipe_config);
9839         else if (IS_CANNONLAKE(dev_priv))
9840                 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
9841         else if (IS_GEN9_BC(dev_priv))
9842                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9843         else if (IS_GEN9_LP(dev_priv))
9844                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
9845         else
9846                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9847
9848         pll = pipe_config->shared_dpll;
9849         if (pll) {
9850                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9851                                                 &pipe_config->dpll_hw_state));
9852         }
9853
9854         /*
9855          * Haswell has only FDI/PCH transcoder A. It is which is connected to
9856          * DDI E. So just check whether this pipe is wired to DDI E and whether
9857          * the PCH transcoder is on.
9858          */
9859         if (INTEL_GEN(dev_priv) < 9 &&
9860             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9861                 pipe_config->has_pch_encoder = true;
9862
9863                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9864                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9865                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
9866
9867                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9868         }
9869 }
9870
9871 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9872                                     struct intel_crtc_state *pipe_config)
9873 {
9874         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9875         enum intel_display_power_domain power_domain;
9876         u64 power_domain_mask;
9877         bool active;
9878
9879         intel_crtc_init_scalers(crtc, pipe_config);
9880
9881         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9882         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9883                 return false;
9884         power_domain_mask = BIT_ULL(power_domain);
9885
9886         pipe_config->shared_dpll = NULL;
9887
9888         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
9889
9890         if (IS_GEN9_LP(dev_priv) &&
9891             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
9892                 WARN_ON(active);
9893                 active = true;
9894         }
9895
9896         if (!active)
9897                 goto out;
9898
9899         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
9900             IS_ICELAKE(dev_priv)) {
9901                 haswell_get_ddi_port_state(crtc, pipe_config);
9902                 intel_get_pipe_timings(crtc, pipe_config);
9903         }
9904
9905         intel_get_pipe_src_size(crtc, pipe_config);
9906         intel_get_crtc_ycbcr_config(crtc, pipe_config);
9907
9908         pipe_config->gamma_mode =
9909                 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9910
9911         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9912         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9913                 WARN_ON(power_domain_mask & BIT_ULL(power_domain));
9914                 power_domain_mask |= BIT_ULL(power_domain);
9915
9916                 if (INTEL_GEN(dev_priv) >= 9)
9917                         skylake_get_pfit_config(crtc, pipe_config);
9918                 else
9919                         ironlake_get_pfit_config(crtc, pipe_config);
9920         }
9921
9922         if (hsw_crtc_supports_ips(crtc)) {
9923                 if (IS_HASWELL(dev_priv))
9924                         pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
9925                 else {
9926                         /*
9927                          * We cannot readout IPS state on broadwell, set to
9928                          * true so we can set it to a defined state on first
9929                          * commit.
9930                          */
9931                         pipe_config->ips_enabled = true;
9932                 }
9933         }
9934
9935         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
9936             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
9937                 pipe_config->pixel_multiplier =
9938                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9939         } else {
9940                 pipe_config->pixel_multiplier = 1;
9941         }
9942
9943 out:
9944         for_each_power_domain(power_domain, power_domain_mask)
9945                 intel_display_power_put_unchecked(dev_priv, power_domain);
9946
9947         return active;
9948 }
9949
9950 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
9951 {
9952         struct drm_i915_private *dev_priv =
9953                 to_i915(plane_state->base.plane->dev);
9954         const struct drm_framebuffer *fb = plane_state->base.fb;
9955         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9956         u32 base;
9957
9958         if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
9959                 base = obj->phys_handle->busaddr;
9960         else
9961                 base = intel_plane_ggtt_offset(plane_state);
9962
9963         base += plane_state->color_plane[0].offset;
9964
9965         /* ILK+ do this automagically */
9966         if (HAS_GMCH(dev_priv) &&
9967             plane_state->base.rotation & DRM_MODE_ROTATE_180)
9968                 base += (plane_state->base.crtc_h *
9969                          plane_state->base.crtc_w - 1) * fb->format->cpp[0];
9970
9971         return base;
9972 }
9973
9974 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
9975 {
9976         int x = plane_state->base.crtc_x;
9977         int y = plane_state->base.crtc_y;
9978         u32 pos = 0;
9979
9980         if (x < 0) {
9981                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
9982                 x = -x;
9983         }
9984         pos |= x << CURSOR_X_SHIFT;
9985
9986         if (y < 0) {
9987                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
9988                 y = -y;
9989         }
9990         pos |= y << CURSOR_Y_SHIFT;
9991
9992         return pos;
9993 }
9994
9995 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
9996 {
9997         const struct drm_mode_config *config =
9998                 &plane_state->base.plane->dev->mode_config;
9999         int width = plane_state->base.crtc_w;
10000         int height = plane_state->base.crtc_h;
10001
10002         return width > 0 && width <= config->cursor_width &&
10003                 height > 0 && height <= config->cursor_height;
10004 }
10005
10006 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10007 {
10008         const struct drm_framebuffer *fb = plane_state->base.fb;
10009         unsigned int rotation = plane_state->base.rotation;
10010         int src_x, src_y;
10011         u32 offset;
10012         int ret;
10013
10014         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
10015         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
10016
10017         ret = intel_plane_check_stride(plane_state);
10018         if (ret)
10019                 return ret;
10020
10021         src_x = plane_state->base.src_x >> 16;
10022         src_y = plane_state->base.src_y >> 16;
10023
10024         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10025         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10026                                                     plane_state, 0);
10027
10028         if (src_x != 0 || src_y != 0) {
10029                 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10030                 return -EINVAL;
10031         }
10032
10033         plane_state->color_plane[0].offset = offset;
10034
10035         return 0;
10036 }
10037
10038 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10039                               struct intel_plane_state *plane_state)
10040 {
10041         const struct drm_framebuffer *fb = plane_state->base.fb;
10042         int ret;
10043
10044         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10045                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10046                 return -EINVAL;
10047         }
10048
10049         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
10050                                                   &crtc_state->base,
10051                                                   DRM_PLANE_HELPER_NO_SCALING,
10052                                                   DRM_PLANE_HELPER_NO_SCALING,
10053                                                   true, true);
10054         if (ret)
10055                 return ret;
10056
10057         if (!plane_state->base.visible)
10058                 return 0;
10059
10060         ret = intel_plane_check_src_coordinates(plane_state);
10061         if (ret)
10062                 return ret;
10063
10064         ret = intel_cursor_check_surface(plane_state);
10065         if (ret)
10066                 return ret;
10067
10068         return 0;
10069 }
10070
10071 static unsigned int
10072 i845_cursor_max_stride(struct intel_plane *plane,
10073                        u32 pixel_format, u64 modifier,
10074                        unsigned int rotation)
10075 {
10076         return 2048;
10077 }
10078
10079 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10080 {
10081         return CURSOR_GAMMA_ENABLE;
10082 }
10083
10084 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10085                            const struct intel_plane_state *plane_state)
10086 {
10087         return CURSOR_ENABLE |
10088                 CURSOR_FORMAT_ARGB |
10089                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10090 }
10091
10092 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10093 {
10094         int width = plane_state->base.crtc_w;
10095
10096         /*
10097          * 845g/865g are only limited by the width of their cursors,
10098          * the height is arbitrary up to the precision of the register.
10099          */
10100         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10101 }
10102
10103 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10104                              struct intel_plane_state *plane_state)
10105 {
10106         const struct drm_framebuffer *fb = plane_state->base.fb;
10107         int ret;
10108
10109         ret = intel_check_cursor(crtc_state, plane_state);
10110         if (ret)
10111                 return ret;
10112
10113         /* if we want to turn off the cursor ignore width and height */
10114         if (!fb)
10115                 return 0;
10116
10117         /* Check for which cursor types we support */
10118         if (!i845_cursor_size_ok(plane_state)) {
10119                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10120                           plane_state->base.crtc_w,
10121                           plane_state->base.crtc_h);
10122                 return -EINVAL;
10123         }
10124
10125         WARN_ON(plane_state->base.visible &&
10126                 plane_state->color_plane[0].stride != fb->pitches[0]);
10127
10128         switch (fb->pitches[0]) {
10129         case 256:
10130         case 512:
10131         case 1024:
10132         case 2048:
10133                 break;
10134         default:
10135                 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10136                               fb->pitches[0]);
10137                 return -EINVAL;
10138         }
10139
10140         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10141
10142         return 0;
10143 }
10144
10145 static void i845_update_cursor(struct intel_plane *plane,
10146                                const struct intel_crtc_state *crtc_state,
10147                                const struct intel_plane_state *plane_state)
10148 {
10149         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10150         u32 cntl = 0, base = 0, pos = 0, size = 0;
10151         unsigned long irqflags;
10152
10153         if (plane_state && plane_state->base.visible) {
10154                 unsigned int width = plane_state->base.crtc_w;
10155                 unsigned int height = plane_state->base.crtc_h;
10156
10157                 cntl = plane_state->ctl |
10158                         i845_cursor_ctl_crtc(crtc_state);
10159
10160                 size = (height << 12) | width;
10161
10162                 base = intel_cursor_base(plane_state);
10163                 pos = intel_cursor_position(plane_state);
10164         }
10165
10166         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10167
10168         /* On these chipsets we can only modify the base/size/stride
10169          * whilst the cursor is disabled.
10170          */
10171         if (plane->cursor.base != base ||
10172             plane->cursor.size != size ||
10173             plane->cursor.cntl != cntl) {
10174                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
10175                 I915_WRITE_FW(CURBASE(PIPE_A), base);
10176                 I915_WRITE_FW(CURSIZE, size);
10177                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10178                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
10179
10180                 plane->cursor.base = base;
10181                 plane->cursor.size = size;
10182                 plane->cursor.cntl = cntl;
10183         } else {
10184                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10185         }
10186
10187         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10188 }
10189
10190 static void i845_disable_cursor(struct intel_plane *plane,
10191                                 const struct intel_crtc_state *crtc_state)
10192 {
10193         i845_update_cursor(plane, crtc_state, NULL);
10194 }
10195
10196 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10197                                      enum pipe *pipe)
10198 {
10199         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10200         enum intel_display_power_domain power_domain;
10201         intel_wakeref_t wakeref;
10202         bool ret;
10203
10204         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
10205         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10206         if (!wakeref)
10207                 return false;
10208
10209         ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
10210
10211         *pipe = PIPE_A;
10212
10213         intel_display_power_put(dev_priv, power_domain, wakeref);
10214
10215         return ret;
10216 }
10217
10218 static unsigned int
10219 i9xx_cursor_max_stride(struct intel_plane *plane,
10220                        u32 pixel_format, u64 modifier,
10221                        unsigned int rotation)
10222 {
10223         return plane->base.dev->mode_config.cursor_width * 4;
10224 }
10225
10226 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10227 {
10228         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10229         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10230         u32 cntl = 0;
10231
10232         if (INTEL_GEN(dev_priv) >= 11)
10233                 return cntl;
10234
10235         cntl |= MCURSOR_GAMMA_ENABLE;
10236
10237         if (HAS_DDI(dev_priv))
10238                 cntl |= MCURSOR_PIPE_CSC_ENABLE;
10239
10240         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10241                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
10242
10243         return cntl;
10244 }
10245
10246 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
10247                            const struct intel_plane_state *plane_state)
10248 {
10249         struct drm_i915_private *dev_priv =
10250                 to_i915(plane_state->base.plane->dev);
10251         u32 cntl = 0;
10252
10253         if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
10254                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10255
10256         switch (plane_state->base.crtc_w) {
10257         case 64:
10258                 cntl |= MCURSOR_MODE_64_ARGB_AX;
10259                 break;
10260         case 128:
10261                 cntl |= MCURSOR_MODE_128_ARGB_AX;
10262                 break;
10263         case 256:
10264                 cntl |= MCURSOR_MODE_256_ARGB_AX;
10265                 break;
10266         default:
10267                 MISSING_CASE(plane_state->base.crtc_w);
10268                 return 0;
10269         }
10270
10271         if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
10272                 cntl |= MCURSOR_ROTATE_180;
10273
10274         return cntl;
10275 }
10276
10277 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
10278 {
10279         struct drm_i915_private *dev_priv =
10280                 to_i915(plane_state->base.plane->dev);
10281         int width = plane_state->base.crtc_w;
10282         int height = plane_state->base.crtc_h;
10283
10284         if (!intel_cursor_size_ok(plane_state))
10285                 return false;
10286
10287         /* Cursor width is limited to a few power-of-two sizes */
10288         switch (width) {
10289         case 256:
10290         case 128:
10291         case 64:
10292                 break;
10293         default:
10294                 return false;
10295         }
10296
10297         /*
10298          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10299          * height from 8 lines up to the cursor width, when the
10300          * cursor is not rotated. Everything else requires square
10301          * cursors.
10302          */
10303         if (HAS_CUR_FBC(dev_priv) &&
10304             plane_state->base.rotation & DRM_MODE_ROTATE_0) {
10305                 if (height < 8 || height > width)
10306                         return false;
10307         } else {
10308                 if (height != width)
10309                         return false;
10310         }
10311
10312         return true;
10313 }
10314
10315 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
10316                              struct intel_plane_state *plane_state)
10317 {
10318         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
10319         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10320         const struct drm_framebuffer *fb = plane_state->base.fb;
10321         enum pipe pipe = plane->pipe;
10322         int ret;
10323
10324         ret = intel_check_cursor(crtc_state, plane_state);
10325         if (ret)
10326                 return ret;
10327
10328         /* if we want to turn off the cursor ignore width and height */
10329         if (!fb)
10330                 return 0;
10331
10332         /* Check for which cursor types we support */
10333         if (!i9xx_cursor_size_ok(plane_state)) {
10334                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10335                           plane_state->base.crtc_w,
10336                           plane_state->base.crtc_h);
10337                 return -EINVAL;
10338         }
10339
10340         WARN_ON(plane_state->base.visible &&
10341                 plane_state->color_plane[0].stride != fb->pitches[0]);
10342
10343         if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10344                 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10345                               fb->pitches[0], plane_state->base.crtc_w);
10346                 return -EINVAL;
10347         }
10348
10349         /*
10350          * There's something wrong with the cursor on CHV pipe C.
10351          * If it straddles the left edge of the screen then
10352          * moving it away from the edge or disabling it often
10353          * results in a pipe underrun, and often that can lead to
10354          * dead pipe (constant underrun reported, and it scans
10355          * out just a solid color). To recover from that, the
10356          * display power well must be turned off and on again.
10357          * Refuse the put the cursor into that compromised position.
10358          */
10359         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10360             plane_state->base.visible && plane_state->base.crtc_x < 0) {
10361                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10362                 return -EINVAL;
10363         }
10364
10365         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10366
10367         return 0;
10368 }
10369
10370 static void i9xx_update_cursor(struct intel_plane *plane,
10371                                const struct intel_crtc_state *crtc_state,
10372                                const struct intel_plane_state *plane_state)
10373 {
10374         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10375         enum pipe pipe = plane->pipe;
10376         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
10377         unsigned long irqflags;
10378
10379         if (plane_state && plane_state->base.visible) {
10380                 cntl = plane_state->ctl |
10381                         i9xx_cursor_ctl_crtc(crtc_state);
10382
10383                 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10384                         fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10385
10386                 base = intel_cursor_base(plane_state);
10387                 pos = intel_cursor_position(plane_state);
10388         }
10389
10390         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10391
10392         /*
10393          * On some platforms writing CURCNTR first will also
10394          * cause CURPOS to be armed by the CURBASE write.
10395          * Without the CURCNTR write the CURPOS write would
10396          * arm itself. Thus we always update CURCNTR before
10397          * CURPOS.
10398          *
10399          * On other platforms CURPOS always requires the
10400          * CURBASE write to arm the update. Additonally
10401          * a write to any of the cursor register will cancel
10402          * an already armed cursor update. Thus leaving out
10403          * the CURBASE write after CURPOS could lead to a
10404          * cursor that doesn't appear to move, or even change
10405          * shape. Thus we always write CURBASE.
10406          *
10407          * The other registers are armed by by the CURBASE write
10408          * except when the plane is getting enabled at which time
10409          * the CURCNTR write arms the update.
10410          */
10411
10412         if (INTEL_GEN(dev_priv) >= 9)
10413                 skl_write_cursor_wm(plane, crtc_state);
10414
10415         if (plane->cursor.base != base ||
10416             plane->cursor.size != fbc_ctl ||
10417             plane->cursor.cntl != cntl) {
10418                 if (HAS_CUR_FBC(dev_priv))
10419                         I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
10420                 I915_WRITE_FW(CURCNTR(pipe), cntl);
10421                 I915_WRITE_FW(CURPOS(pipe), pos);
10422                 I915_WRITE_FW(CURBASE(pipe), base);
10423
10424                 plane->cursor.base = base;
10425                 plane->cursor.size = fbc_ctl;
10426                 plane->cursor.cntl = cntl;
10427         } else {
10428                 I915_WRITE_FW(CURPOS(pipe), pos);
10429                 I915_WRITE_FW(CURBASE(pipe), base);
10430         }
10431
10432         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10433 }
10434
10435 static void i9xx_disable_cursor(struct intel_plane *plane,
10436                                 const struct intel_crtc_state *crtc_state)
10437 {
10438         i9xx_update_cursor(plane, crtc_state, NULL);
10439 }
10440
10441 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10442                                      enum pipe *pipe)
10443 {
10444         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10445         enum intel_display_power_domain power_domain;
10446         intel_wakeref_t wakeref;
10447         bool ret;
10448         u32 val;
10449
10450         /*
10451          * Not 100% correct for planes that can move between pipes,
10452          * but that's only the case for gen2-3 which don't have any
10453          * display power wells.
10454          */
10455         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
10456         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10457         if (!wakeref)
10458                 return false;
10459
10460         val = I915_READ(CURCNTR(plane->pipe));
10461
10462         ret = val & MCURSOR_MODE;
10463
10464         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10465                 *pipe = plane->pipe;
10466         else
10467                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10468                         MCURSOR_PIPE_SELECT_SHIFT;
10469
10470         intel_display_power_put(dev_priv, power_domain, wakeref);
10471
10472         return ret;
10473 }
10474
10475 /* VESA 640x480x72Hz mode to set on the pipe */
10476 static const struct drm_display_mode load_detect_mode = {
10477         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10478                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10479 };
10480
10481 struct drm_framebuffer *
10482 intel_framebuffer_create(struct drm_i915_gem_object *obj,
10483                          struct drm_mode_fb_cmd2 *mode_cmd)
10484 {
10485         struct intel_framebuffer *intel_fb;
10486         int ret;
10487
10488         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10489         if (!intel_fb)
10490                 return ERR_PTR(-ENOMEM);
10491
10492         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
10493         if (ret)
10494                 goto err;
10495
10496         return &intel_fb->base;
10497
10498 err:
10499         kfree(intel_fb);
10500         return ERR_PTR(ret);
10501 }
10502
10503 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10504                                         struct drm_crtc *crtc)
10505 {
10506         struct drm_plane *plane;
10507         struct drm_plane_state *plane_state;
10508         int ret, i;
10509
10510         ret = drm_atomic_add_affected_planes(state, crtc);
10511         if (ret)
10512                 return ret;
10513
10514         for_each_new_plane_in_state(state, plane, plane_state, i) {
10515                 if (plane_state->crtc != crtc)
10516                         continue;
10517
10518                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10519                 if (ret)
10520                         return ret;
10521
10522                 drm_atomic_set_fb_for_plane(plane_state, NULL);
10523         }
10524
10525         return 0;
10526 }
10527
10528 int intel_get_load_detect_pipe(struct drm_connector *connector,
10529                                const struct drm_display_mode *mode,
10530                                struct intel_load_detect_pipe *old,
10531                                struct drm_modeset_acquire_ctx *ctx)
10532 {
10533         struct intel_crtc *intel_crtc;
10534         struct intel_encoder *intel_encoder =
10535                 intel_attached_encoder(connector);
10536         struct drm_crtc *possible_crtc;
10537         struct drm_encoder *encoder = &intel_encoder->base;
10538         struct drm_crtc *crtc = NULL;
10539         struct drm_device *dev = encoder->dev;
10540         struct drm_i915_private *dev_priv = to_i915(dev);
10541         struct drm_mode_config *config = &dev->mode_config;
10542         struct drm_atomic_state *state = NULL, *restore_state = NULL;
10543         struct drm_connector_state *connector_state;
10544         struct intel_crtc_state *crtc_state;
10545         int ret, i = -1;
10546
10547         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10548                       connector->base.id, connector->name,
10549                       encoder->base.id, encoder->name);
10550
10551         old->restore_state = NULL;
10552
10553         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
10554
10555         /*
10556          * Algorithm gets a little messy:
10557          *
10558          *   - if the connector already has an assigned crtc, use it (but make
10559          *     sure it's on first)
10560          *
10561          *   - try to find the first unused crtc that can drive this connector,
10562          *     and use that if we find one
10563          */
10564
10565         /* See if we already have a CRTC for this connector */
10566         if (connector->state->crtc) {
10567                 crtc = connector->state->crtc;
10568
10569                 ret = drm_modeset_lock(&crtc->mutex, ctx);
10570                 if (ret)
10571                         goto fail;
10572
10573                 /* Make sure the crtc and connector are running */
10574                 goto found;
10575         }
10576
10577         /* Find an unused one (if possible) */
10578         for_each_crtc(dev, possible_crtc) {
10579                 i++;
10580                 if (!(encoder->possible_crtcs & (1 << i)))
10581                         continue;
10582
10583                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10584                 if (ret)
10585                         goto fail;
10586
10587                 if (possible_crtc->state->enable) {
10588                         drm_modeset_unlock(&possible_crtc->mutex);
10589                         continue;
10590                 }
10591
10592                 crtc = possible_crtc;
10593                 break;
10594         }
10595
10596         /*
10597          * If we didn't find an unused CRTC, don't use any.
10598          */
10599         if (!crtc) {
10600                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10601                 ret = -ENODEV;
10602                 goto fail;
10603         }
10604
10605 found:
10606         intel_crtc = to_intel_crtc(crtc);
10607
10608         state = drm_atomic_state_alloc(dev);
10609         restore_state = drm_atomic_state_alloc(dev);
10610         if (!state || !restore_state) {
10611                 ret = -ENOMEM;
10612                 goto fail;
10613         }
10614
10615         state->acquire_ctx = ctx;
10616         restore_state->acquire_ctx = ctx;
10617
10618         connector_state = drm_atomic_get_connector_state(state, connector);
10619         if (IS_ERR(connector_state)) {
10620                 ret = PTR_ERR(connector_state);
10621                 goto fail;
10622         }
10623
10624         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10625         if (ret)
10626                 goto fail;
10627
10628         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10629         if (IS_ERR(crtc_state)) {
10630                 ret = PTR_ERR(crtc_state);
10631                 goto fail;
10632         }
10633
10634         crtc_state->base.active = crtc_state->base.enable = true;
10635
10636         if (!mode)
10637                 mode = &load_detect_mode;
10638
10639         ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10640         if (ret)
10641                 goto fail;
10642
10643         ret = intel_modeset_disable_planes(state, crtc);
10644         if (ret)
10645                 goto fail;
10646
10647         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10648         if (!ret)
10649                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10650         if (!ret)
10651                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
10652         if (ret) {
10653                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10654                 goto fail;
10655         }
10656
10657         ret = drm_atomic_commit(state);
10658         if (ret) {
10659                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10660                 goto fail;
10661         }
10662
10663         old->restore_state = restore_state;
10664         drm_atomic_state_put(state);
10665
10666         /* let the connector get through one full cycle before testing */
10667         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
10668         return true;
10669
10670 fail:
10671         if (state) {
10672                 drm_atomic_state_put(state);
10673                 state = NULL;
10674         }
10675         if (restore_state) {
10676                 drm_atomic_state_put(restore_state);
10677                 restore_state = NULL;
10678         }
10679
10680         if (ret == -EDEADLK)
10681                 return ret;
10682
10683         return false;
10684 }
10685
10686 void intel_release_load_detect_pipe(struct drm_connector *connector,
10687                                     struct intel_load_detect_pipe *old,
10688                                     struct drm_modeset_acquire_ctx *ctx)
10689 {
10690         struct intel_encoder *intel_encoder =
10691                 intel_attached_encoder(connector);
10692         struct drm_encoder *encoder = &intel_encoder->base;
10693         struct drm_atomic_state *state = old->restore_state;
10694         int ret;
10695
10696         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10697                       connector->base.id, connector->name,
10698                       encoder->base.id, encoder->name);
10699
10700         if (!state)
10701                 return;
10702
10703         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
10704         if (ret)
10705                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10706         drm_atomic_state_put(state);
10707 }
10708
10709 static int i9xx_pll_refclk(struct drm_device *dev,
10710                            const struct intel_crtc_state *pipe_config)
10711 {
10712         struct drm_i915_private *dev_priv = to_i915(dev);
10713         u32 dpll = pipe_config->dpll_hw_state.dpll;
10714
10715         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10716                 return dev_priv->vbt.lvds_ssc_freq;
10717         else if (HAS_PCH_SPLIT(dev_priv))
10718                 return 120000;
10719         else if (!IS_GEN(dev_priv, 2))
10720                 return 96000;
10721         else
10722                 return 48000;
10723 }
10724
10725 /* Returns the clock of the currently programmed mode of the given pipe. */
10726 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10727                                 struct intel_crtc_state *pipe_config)
10728 {
10729         struct drm_device *dev = crtc->base.dev;
10730         struct drm_i915_private *dev_priv = to_i915(dev);
10731         int pipe = pipe_config->cpu_transcoder;
10732         u32 dpll = pipe_config->dpll_hw_state.dpll;
10733         u32 fp;
10734         struct dpll clock;
10735         int port_clock;
10736         int refclk = i9xx_pll_refclk(dev, pipe_config);
10737
10738         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10739                 fp = pipe_config->dpll_hw_state.fp0;
10740         else
10741                 fp = pipe_config->dpll_hw_state.fp1;
10742
10743         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10744         if (IS_PINEVIEW(dev_priv)) {
10745                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10746                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10747         } else {
10748                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10749                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10750         }
10751
10752         if (!IS_GEN(dev_priv, 2)) {
10753                 if (IS_PINEVIEW(dev_priv))
10754                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10755                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10756                 else
10757                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10758                                DPLL_FPA01_P1_POST_DIV_SHIFT);
10759
10760                 switch (dpll & DPLL_MODE_MASK) {
10761                 case DPLLB_MODE_DAC_SERIAL:
10762                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10763                                 5 : 10;
10764                         break;
10765                 case DPLLB_MODE_LVDS:
10766                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10767                                 7 : 14;
10768                         break;
10769                 default:
10770                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10771                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
10772                         return;
10773                 }
10774
10775                 if (IS_PINEVIEW(dev_priv))
10776                         port_clock = pnv_calc_dpll_params(refclk, &clock);
10777                 else
10778                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
10779         } else {
10780                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
10781                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10782
10783                 if (is_lvds) {
10784                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10785                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
10786
10787                         if (lvds & LVDS_CLKB_POWER_UP)
10788                                 clock.p2 = 7;
10789                         else
10790                                 clock.p2 = 14;
10791                 } else {
10792                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
10793                                 clock.p1 = 2;
10794                         else {
10795                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10796                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10797                         }
10798                         if (dpll & PLL_P2_DIVIDE_BY_4)
10799                                 clock.p2 = 4;
10800                         else
10801                                 clock.p2 = 2;
10802                 }
10803
10804                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10805         }
10806
10807         /*
10808          * This value includes pixel_multiplier. We will use
10809          * port_clock to compute adjusted_mode.crtc_clock in the
10810          * encoder's get_config() function.
10811          */
10812         pipe_config->port_clock = port_clock;
10813 }
10814
10815 int intel_dotclock_calculate(int link_freq,
10816                              const struct intel_link_m_n *m_n)
10817 {
10818         /*
10819          * The calculation for the data clock is:
10820          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10821          * But we want to avoid losing precison if possible, so:
10822          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10823          *
10824          * and the link clock is simpler:
10825          * link_clock = (m * link_clock) / n
10826          */
10827
10828         if (!m_n->link_n)
10829                 return 0;
10830
10831         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
10832 }
10833
10834 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10835                                    struct intel_crtc_state *pipe_config)
10836 {
10837         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10838
10839         /* read out port_clock from the DPLL */
10840         i9xx_crtc_clock_get(crtc, pipe_config);
10841
10842         /*
10843          * In case there is an active pipe without active ports,
10844          * we may need some idea for the dotclock anyway.
10845          * Calculate one based on the FDI configuration.
10846          */
10847         pipe_config->base.adjusted_mode.crtc_clock =
10848                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10849                                          &pipe_config->fdi_m_n);
10850 }
10851
10852 /* Returns the currently programmed mode of the given encoder. */
10853 struct drm_display_mode *
10854 intel_encoder_current_mode(struct intel_encoder *encoder)
10855 {
10856         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10857         struct intel_crtc_state *crtc_state;
10858         struct drm_display_mode *mode;
10859         struct intel_crtc *crtc;
10860         enum pipe pipe;
10861
10862         if (!encoder->get_hw_state(encoder, &pipe))
10863                 return NULL;
10864
10865         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10866
10867         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10868         if (!mode)
10869                 return NULL;
10870
10871         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
10872         if (!crtc_state) {
10873                 kfree(mode);
10874                 return NULL;
10875         }
10876
10877         crtc_state->base.crtc = &crtc->base;
10878
10879         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
10880                 kfree(crtc_state);
10881                 kfree(mode);
10882                 return NULL;
10883         }
10884
10885         encoder->get_config(encoder, crtc_state);
10886
10887         intel_mode_from_pipe_config(mode, crtc_state);
10888
10889         kfree(crtc_state);
10890
10891         return mode;
10892 }
10893
10894 static void intel_crtc_destroy(struct drm_crtc *crtc)
10895 {
10896         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10897
10898         drm_crtc_cleanup(crtc);
10899         kfree(intel_crtc);
10900 }
10901
10902 /**
10903  * intel_wm_need_update - Check whether watermarks need updating
10904  * @cur: current plane state
10905  * @new: new plane state
10906  *
10907  * Check current plane state versus the new one to determine whether
10908  * watermarks need to be recalculated.
10909  *
10910  * Returns true or false.
10911  */
10912 static bool intel_wm_need_update(struct intel_plane_state *cur,
10913                                  struct intel_plane_state *new)
10914 {
10915         /* Update watermarks on tiling or size changes. */
10916         if (new->base.visible != cur->base.visible)
10917                 return true;
10918
10919         if (!cur->base.fb || !new->base.fb)
10920                 return false;
10921
10922         if (cur->base.fb->modifier != new->base.fb->modifier ||
10923             cur->base.rotation != new->base.rotation ||
10924             drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
10925             drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
10926             drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
10927             drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
10928                 return true;
10929
10930         return false;
10931 }
10932
10933 static bool needs_scaling(const struct intel_plane_state *state)
10934 {
10935         int src_w = drm_rect_width(&state->base.src) >> 16;
10936         int src_h = drm_rect_height(&state->base.src) >> 16;
10937         int dst_w = drm_rect_width(&state->base.dst);
10938         int dst_h = drm_rect_height(&state->base.dst);
10939
10940         return (src_w != dst_w || src_h != dst_h);
10941 }
10942
10943 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
10944                                     struct drm_crtc_state *crtc_state,
10945                                     const struct intel_plane_state *old_plane_state,
10946                                     struct drm_plane_state *plane_state)
10947 {
10948         struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
10949         struct drm_crtc *crtc = crtc_state->crtc;
10950         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10951         struct intel_plane *plane = to_intel_plane(plane_state->plane);
10952         struct drm_device *dev = crtc->dev;
10953         struct drm_i915_private *dev_priv = to_i915(dev);
10954         bool mode_changed = needs_modeset(crtc_state);
10955         bool was_crtc_enabled = old_crtc_state->base.active;
10956         bool is_crtc_enabled = crtc_state->active;
10957         bool turn_off, turn_on, visible, was_visible;
10958         struct drm_framebuffer *fb = plane_state->fb;
10959         int ret;
10960
10961         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
10962                 ret = skl_update_scaler_plane(
10963                         to_intel_crtc_state(crtc_state),
10964                         to_intel_plane_state(plane_state));
10965                 if (ret)
10966                         return ret;
10967         }
10968
10969         was_visible = old_plane_state->base.visible;
10970         visible = plane_state->visible;
10971
10972         if (!was_crtc_enabled && WARN_ON(was_visible))
10973                 was_visible = false;
10974
10975         /*
10976          * Visibility is calculated as if the crtc was on, but
10977          * after scaler setup everything depends on it being off
10978          * when the crtc isn't active.
10979          *
10980          * FIXME this is wrong for watermarks. Watermarks should also
10981          * be computed as if the pipe would be active. Perhaps move
10982          * per-plane wm computation to the .check_plane() hook, and
10983          * only combine the results from all planes in the current place?
10984          */
10985         if (!is_crtc_enabled) {
10986                 plane_state->visible = visible = false;
10987                 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
10988         }
10989
10990         if (!was_visible && !visible)
10991                 return 0;
10992
10993         if (fb != old_plane_state->base.fb)
10994                 pipe_config->fb_changed = true;
10995
10996         turn_off = was_visible && (!visible || mode_changed);
10997         turn_on = visible && (!was_visible || mode_changed);
10998
10999         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
11000                          intel_crtc->base.base.id, intel_crtc->base.name,
11001                          plane->base.base.id, plane->base.name,
11002                          fb ? fb->base.id : -1);
11003
11004         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11005                          plane->base.base.id, plane->base.name,
11006                          was_visible, visible,
11007                          turn_off, turn_on, mode_changed);
11008
11009         if (turn_on) {
11010                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11011                         pipe_config->update_wm_pre = true;
11012
11013                 /* must disable cxsr around plane enable/disable */
11014                 if (plane->id != PLANE_CURSOR)
11015                         pipe_config->disable_cxsr = true;
11016         } else if (turn_off) {
11017                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11018                         pipe_config->update_wm_post = true;
11019
11020                 /* must disable cxsr around plane enable/disable */
11021                 if (plane->id != PLANE_CURSOR)
11022                         pipe_config->disable_cxsr = true;
11023         } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
11024                                         to_intel_plane_state(plane_state))) {
11025                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11026                         /* FIXME bollocks */
11027                         pipe_config->update_wm_pre = true;
11028                         pipe_config->update_wm_post = true;
11029                 }
11030         }
11031
11032         if (visible || was_visible)
11033                 pipe_config->fb_bits |= plane->frontbuffer_bit;
11034
11035         /*
11036          * ILK/SNB DVSACNTR/Sprite Enable
11037          * IVB SPR_CTL/Sprite Enable
11038          * "When in Self Refresh Big FIFO mode, a write to enable the
11039          *  plane will be internally buffered and delayed while Big FIFO
11040          *  mode is exiting."
11041          *
11042          * Which means that enabling the sprite can take an extra frame
11043          * when we start in big FIFO mode (LP1+). Thus we need to drop
11044          * down to LP0 and wait for vblank in order to make sure the
11045          * sprite gets enabled on the next vblank after the register write.
11046          * Doing otherwise would risk enabling the sprite one frame after
11047          * we've already signalled flip completion. We can resume LP1+
11048          * once the sprite has been enabled.
11049          *
11050          *
11051          * WaCxSRDisabledForSpriteScaling:ivb
11052          * IVB SPR_SCALE/Scaling Enable
11053          * "Low Power watermarks must be disabled for at least one
11054          *  frame before enabling sprite scaling, and kept disabled
11055          *  until sprite scaling is disabled."
11056          *
11057          * ILK/SNB DVSASCALE/Scaling Enable
11058          * "When in Self Refresh Big FIFO mode, scaling enable will be
11059          *  masked off while Big FIFO mode is exiting."
11060          *
11061          * Despite the w/a only being listed for IVB we assume that
11062          * the ILK/SNB note has similar ramifications, hence we apply
11063          * the w/a on all three platforms.
11064          *
11065          * With experimental results seems this is needed also for primary
11066          * plane, not only sprite plane.
11067          */
11068         if (plane->id != PLANE_CURSOR &&
11069             (IS_GEN_RANGE(dev_priv, 5, 6) ||
11070              IS_IVYBRIDGE(dev_priv)) &&
11071             (turn_on || (!needs_scaling(old_plane_state) &&
11072                          needs_scaling(to_intel_plane_state(plane_state)))))
11073                 pipe_config->disable_lp_wm = true;
11074
11075         return 0;
11076 }
11077
11078 static bool encoders_cloneable(const struct intel_encoder *a,
11079                                const struct intel_encoder *b)
11080 {
11081         /* masks could be asymmetric, so check both ways */
11082         return a == b || (a->cloneable & (1 << b->type) &&
11083                           b->cloneable & (1 << a->type));
11084 }
11085
11086 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11087                                          struct intel_crtc *crtc,
11088                                          struct intel_encoder *encoder)
11089 {
11090         struct intel_encoder *source_encoder;
11091         struct drm_connector *connector;
11092         struct drm_connector_state *connector_state;
11093         int i;
11094
11095         for_each_new_connector_in_state(state, connector, connector_state, i) {
11096                 if (connector_state->crtc != &crtc->base)
11097                         continue;
11098
11099                 source_encoder =
11100                         to_intel_encoder(connector_state->best_encoder);
11101                 if (!encoders_cloneable(encoder, source_encoder))
11102                         return false;
11103         }
11104
11105         return true;
11106 }
11107
11108 static int icl_add_linked_planes(struct intel_atomic_state *state)
11109 {
11110         struct intel_plane *plane, *linked;
11111         struct intel_plane_state *plane_state, *linked_plane_state;
11112         int i;
11113
11114         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11115                 linked = plane_state->linked_plane;
11116
11117                 if (!linked)
11118                         continue;
11119
11120                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11121                 if (IS_ERR(linked_plane_state))
11122                         return PTR_ERR(linked_plane_state);
11123
11124                 WARN_ON(linked_plane_state->linked_plane != plane);
11125                 WARN_ON(linked_plane_state->slave == plane_state->slave);
11126         }
11127
11128         return 0;
11129 }
11130
11131 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11132 {
11133         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11134         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11135         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
11136         struct intel_plane *plane, *linked;
11137         struct intel_plane_state *plane_state;
11138         int i;
11139
11140         if (INTEL_GEN(dev_priv) < 11)
11141                 return 0;
11142
11143         /*
11144          * Destroy all old plane links and make the slave plane invisible
11145          * in the crtc_state->active_planes mask.
11146          */
11147         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11148                 if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
11149                         continue;
11150
11151                 plane_state->linked_plane = NULL;
11152                 if (plane_state->slave && !plane_state->base.visible) {
11153                         crtc_state->active_planes &= ~BIT(plane->id);
11154                         crtc_state->update_planes |= BIT(plane->id);
11155                 }
11156
11157                 plane_state->slave = false;
11158         }
11159
11160         if (!crtc_state->nv12_planes)
11161                 return 0;
11162
11163         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11164                 struct intel_plane_state *linked_state = NULL;
11165
11166                 if (plane->pipe != crtc->pipe ||
11167                     !(crtc_state->nv12_planes & BIT(plane->id)))
11168                         continue;
11169
11170                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11171                         if (!icl_is_nv12_y_plane(linked->id))
11172                                 continue;
11173
11174                         if (crtc_state->active_planes & BIT(linked->id))
11175                                 continue;
11176
11177                         linked_state = intel_atomic_get_plane_state(state, linked);
11178                         if (IS_ERR(linked_state))
11179                                 return PTR_ERR(linked_state);
11180
11181                         break;
11182                 }
11183
11184                 if (!linked_state) {
11185                         DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
11186                                       hweight8(crtc_state->nv12_planes));
11187
11188                         return -EINVAL;
11189                 }
11190
11191                 plane_state->linked_plane = linked;
11192
11193                 linked_state->slave = true;
11194                 linked_state->linked_plane = plane;
11195                 crtc_state->active_planes |= BIT(linked->id);
11196                 crtc_state->update_planes |= BIT(linked->id);
11197                 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
11198         }
11199
11200         return 0;
11201 }
11202
11203 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11204                                    struct drm_crtc_state *crtc_state)
11205 {
11206         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11207         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11208         struct intel_crtc_state *pipe_config =
11209                 to_intel_crtc_state(crtc_state);
11210         int ret;
11211         bool mode_changed = needs_modeset(crtc_state);
11212
11213         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
11214             mode_changed && !crtc_state->active)
11215                 pipe_config->update_wm_post = true;
11216
11217         if (mode_changed && crtc_state->enable &&
11218             dev_priv->display.crtc_compute_clock &&
11219             !WARN_ON(pipe_config->shared_dpll)) {
11220                 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11221                                                            pipe_config);
11222                 if (ret)
11223                         return ret;
11224         }
11225
11226         if (mode_changed || crtc_state->color_mgmt_changed) {
11227                 ret = intel_color_check(pipe_config);
11228                 if (ret)
11229                         return ret;
11230
11231                 /*
11232                  * Changing color management on Intel hardware is
11233                  * handled as part of planes update.
11234                  */
11235                 crtc_state->planes_changed = true;
11236         }
11237
11238         ret = 0;
11239         if (dev_priv->display.compute_pipe_wm) {
11240                 ret = dev_priv->display.compute_pipe_wm(pipe_config);
11241                 if (ret) {
11242                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11243                         return ret;
11244                 }
11245         }
11246
11247         if (dev_priv->display.compute_intermediate_wm) {
11248                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11249                         return 0;
11250
11251                 /*
11252                  * Calculate 'intermediate' watermarks that satisfy both the
11253                  * old state and the new state.  We can program these
11254                  * immediately.
11255                  */
11256                 ret = dev_priv->display.compute_intermediate_wm(pipe_config);
11257                 if (ret) {
11258                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11259                         return ret;
11260                 }
11261         }
11262
11263         if (INTEL_GEN(dev_priv) >= 9) {
11264                 if (mode_changed || pipe_config->update_pipe)
11265                         ret = skl_update_scaler_crtc(pipe_config);
11266
11267                 if (!ret)
11268                         ret = icl_check_nv12_planes(pipe_config);
11269                 if (!ret)
11270                         ret = skl_check_pipe_max_pixel_rate(intel_crtc,
11271                                                             pipe_config);
11272                 if (!ret)
11273                         ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
11274                                                          pipe_config);
11275         }
11276
11277         if (HAS_IPS(dev_priv))
11278                 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
11279
11280         return ret;
11281 }
11282
11283 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11284         .atomic_check = intel_crtc_atomic_check,
11285 };
11286
11287 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11288 {
11289         struct intel_connector *connector;
11290         struct drm_connector_list_iter conn_iter;
11291
11292         drm_connector_list_iter_begin(dev, &conn_iter);
11293         for_each_intel_connector_iter(connector, &conn_iter) {
11294                 if (connector->base.state->crtc)
11295                         drm_connector_put(&connector->base);
11296
11297                 if (connector->base.encoder) {
11298                         connector->base.state->best_encoder =
11299                                 connector->base.encoder;
11300                         connector->base.state->crtc =
11301                                 connector->base.encoder->crtc;
11302
11303                         drm_connector_get(&connector->base);
11304                 } else {
11305                         connector->base.state->best_encoder = NULL;
11306                         connector->base.state->crtc = NULL;
11307                 }
11308         }
11309         drm_connector_list_iter_end(&conn_iter);
11310 }
11311
11312 static int
11313 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
11314                       struct intel_crtc_state *pipe_config)
11315 {
11316         struct drm_connector *connector = conn_state->connector;
11317         const struct drm_display_info *info = &connector->display_info;
11318         int bpp;
11319
11320         switch (conn_state->max_bpc) {
11321         case 6 ... 7:
11322                 bpp = 6 * 3;
11323                 break;
11324         case 8 ... 9:
11325                 bpp = 8 * 3;
11326                 break;
11327         case 10 ... 11:
11328                 bpp = 10 * 3;
11329                 break;
11330         case 12:
11331                 bpp = 12 * 3;
11332                 break;
11333         default:
11334                 return -EINVAL;
11335         }
11336
11337         if (bpp < pipe_config->pipe_bpp) {
11338                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11339                               "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11340                               connector->base.id, connector->name,
11341                               bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
11342                               pipe_config->pipe_bpp);
11343
11344                 pipe_config->pipe_bpp = bpp;
11345         }
11346
11347         return 0;
11348 }
11349
11350 static int
11351 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11352                           struct intel_crtc_state *pipe_config)
11353 {
11354         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11355         struct drm_atomic_state *state = pipe_config->base.state;
11356         struct drm_connector *connector;
11357         struct drm_connector_state *connector_state;
11358         int bpp, i;
11359
11360         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11361             IS_CHERRYVIEW(dev_priv)))
11362                 bpp = 10*3;
11363         else if (INTEL_GEN(dev_priv) >= 5)
11364                 bpp = 12*3;
11365         else
11366                 bpp = 8*3;
11367
11368         pipe_config->pipe_bpp = bpp;
11369
11370         /* Clamp display bpp to connector max bpp */
11371         for_each_new_connector_in_state(state, connector, connector_state, i) {
11372                 int ret;
11373
11374                 if (connector_state->crtc != &crtc->base)
11375                         continue;
11376
11377                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
11378                 if (ret)
11379                         return ret;
11380         }
11381
11382         return 0;
11383 }
11384
11385 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11386 {
11387         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11388                         "type: 0x%x flags: 0x%x\n",
11389                 mode->crtc_clock,
11390                 mode->crtc_hdisplay, mode->crtc_hsync_start,
11391                 mode->crtc_hsync_end, mode->crtc_htotal,
11392                 mode->crtc_vdisplay, mode->crtc_vsync_start,
11393                 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
11394 }
11395
11396 static inline void
11397 intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
11398                       unsigned int lane_count, struct intel_link_m_n *m_n)
11399 {
11400         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11401                       id, lane_count,
11402                       m_n->gmch_m, m_n->gmch_n,
11403                       m_n->link_m, m_n->link_n, m_n->tu);
11404 }
11405
11406 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
11407
11408 static const char * const output_type_str[] = {
11409         OUTPUT_TYPE(UNUSED),
11410         OUTPUT_TYPE(ANALOG),
11411         OUTPUT_TYPE(DVO),
11412         OUTPUT_TYPE(SDVO),
11413         OUTPUT_TYPE(LVDS),
11414         OUTPUT_TYPE(TVOUT),
11415         OUTPUT_TYPE(HDMI),
11416         OUTPUT_TYPE(DP),
11417         OUTPUT_TYPE(EDP),
11418         OUTPUT_TYPE(DSI),
11419         OUTPUT_TYPE(DDI),
11420         OUTPUT_TYPE(DP_MST),
11421 };
11422
11423 #undef OUTPUT_TYPE
11424
11425 static void snprintf_output_types(char *buf, size_t len,
11426                                   unsigned int output_types)
11427 {
11428         char *str = buf;
11429         int i;
11430
11431         str[0] = '\0';
11432
11433         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
11434                 int r;
11435
11436                 if ((output_types & BIT(i)) == 0)
11437                         continue;
11438
11439                 r = snprintf(str, len, "%s%s",
11440                              str != buf ? "," : "", output_type_str[i]);
11441                 if (r >= len)
11442                         break;
11443                 str += r;
11444                 len -= r;
11445
11446                 output_types &= ~BIT(i);
11447         }
11448
11449         WARN_ON_ONCE(output_types != 0);
11450 }
11451
11452 static const char * const output_format_str[] = {
11453         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
11454         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
11455         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
11456         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
11457 };
11458
11459 static const char *output_formats(enum intel_output_format format)
11460 {
11461         if (format >= ARRAY_SIZE(output_format_str))
11462                 format = INTEL_OUTPUT_FORMAT_INVALID;
11463         return output_format_str[format];
11464 }
11465
11466 static void intel_dump_pipe_config(struct intel_crtc *crtc,
11467                                    struct intel_crtc_state *pipe_config,
11468                                    const char *context)
11469 {
11470         struct drm_device *dev = crtc->base.dev;
11471         struct drm_i915_private *dev_priv = to_i915(dev);
11472         struct drm_plane *plane;
11473         struct intel_plane *intel_plane;
11474         struct intel_plane_state *state;
11475         struct drm_framebuffer *fb;
11476         char buf[64];
11477
11478         DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
11479                       crtc->base.base.id, crtc->base.name, context);
11480
11481         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
11482         DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
11483                       buf, pipe_config->output_types);
11484
11485         DRM_DEBUG_KMS("output format: %s\n",
11486                       output_formats(pipe_config->output_format));
11487
11488         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11489                       transcoder_name(pipe_config->cpu_transcoder),
11490                       pipe_config->pipe_bpp, pipe_config->dither);
11491
11492         if (pipe_config->has_pch_encoder)
11493                 intel_dump_m_n_config(pipe_config, "fdi",
11494                                       pipe_config->fdi_lanes,
11495                                       &pipe_config->fdi_m_n);
11496
11497         if (intel_crtc_has_dp_encoder(pipe_config)) {
11498                 intel_dump_m_n_config(pipe_config, "dp m_n",
11499                                 pipe_config->lane_count, &pipe_config->dp_m_n);
11500                 if (pipe_config->has_drrs)
11501                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
11502                                               pipe_config->lane_count,
11503                                               &pipe_config->dp_m2_n2);
11504         }
11505
11506         DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
11507                       pipe_config->has_audio, pipe_config->has_infoframe);
11508
11509         DRM_DEBUG_KMS("requested mode:\n");
11510         drm_mode_debug_printmodeline(&pipe_config->base.mode);
11511         DRM_DEBUG_KMS("adjusted mode:\n");
11512         drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11513         intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
11514         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
11515                       pipe_config->port_clock,
11516                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11517                       pipe_config->pixel_rate);
11518
11519         if (INTEL_GEN(dev_priv) >= 9)
11520                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11521                               crtc->num_scalers,
11522                               pipe_config->scaler_state.scaler_users,
11523                               pipe_config->scaler_state.scaler_id);
11524
11525         if (HAS_GMCH(dev_priv))
11526                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11527                               pipe_config->gmch_pfit.control,
11528                               pipe_config->gmch_pfit.pgm_ratios,
11529                               pipe_config->gmch_pfit.lvds_border_bits);
11530         else
11531                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
11532                               pipe_config->pch_pfit.pos,
11533                               pipe_config->pch_pfit.size,
11534                               enableddisabled(pipe_config->pch_pfit.enabled));
11535
11536         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11537                       pipe_config->ips_enabled, pipe_config->double_wide);
11538
11539         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
11540
11541         DRM_DEBUG_KMS("planes on this crtc\n");
11542         list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
11543                 struct drm_format_name_buf format_name;
11544                 intel_plane = to_intel_plane(plane);
11545                 if (intel_plane->pipe != crtc->pipe)
11546                         continue;
11547
11548                 state = to_intel_plane_state(plane->state);
11549                 fb = state->base.fb;
11550                 if (!fb) {
11551                         DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
11552                                       plane->base.id, plane->name, state->scaler_id);
11553                         continue;
11554                 }
11555
11556                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
11557                               plane->base.id, plane->name,
11558                               fb->base.id, fb->width, fb->height,
11559                               drm_get_format_name(fb->format->format, &format_name));
11560                 if (INTEL_GEN(dev_priv) >= 9)
11561                         DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
11562                                       state->scaler_id,
11563                                       state->base.src.x1 >> 16,
11564                                       state->base.src.y1 >> 16,
11565                                       drm_rect_width(&state->base.src) >> 16,
11566                                       drm_rect_height(&state->base.src) >> 16,
11567                                       state->base.dst.x1, state->base.dst.y1,
11568                                       drm_rect_width(&state->base.dst),
11569                                       drm_rect_height(&state->base.dst));
11570         }
11571 }
11572
11573 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
11574 {
11575         struct drm_device *dev = state->dev;
11576         struct drm_connector *connector;
11577         struct drm_connector_list_iter conn_iter;
11578         unsigned int used_ports = 0;
11579         unsigned int used_mst_ports = 0;
11580         bool ret = true;
11581
11582         /*
11583          * Walk the connector list instead of the encoder
11584          * list to detect the problem on ddi platforms
11585          * where there's just one encoder per digital port.
11586          */
11587         drm_connector_list_iter_begin(dev, &conn_iter);
11588         drm_for_each_connector_iter(connector, &conn_iter) {
11589                 struct drm_connector_state *connector_state;
11590                 struct intel_encoder *encoder;
11591
11592                 connector_state = drm_atomic_get_new_connector_state(state, connector);
11593                 if (!connector_state)
11594                         connector_state = connector->state;
11595
11596                 if (!connector_state->best_encoder)
11597                         continue;
11598
11599                 encoder = to_intel_encoder(connector_state->best_encoder);
11600
11601                 WARN_ON(!connector_state->crtc);
11602
11603                 switch (encoder->type) {
11604                         unsigned int port_mask;
11605                 case INTEL_OUTPUT_DDI:
11606                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
11607                                 break;
11608                         /* else: fall through */
11609                 case INTEL_OUTPUT_DP:
11610                 case INTEL_OUTPUT_HDMI:
11611                 case INTEL_OUTPUT_EDP:
11612                         port_mask = 1 << encoder->port;
11613
11614                         /* the same port mustn't appear more than once */
11615                         if (used_ports & port_mask)
11616                                 ret = false;
11617
11618                         used_ports |= port_mask;
11619                         break;
11620                 case INTEL_OUTPUT_DP_MST:
11621                         used_mst_ports |=
11622                                 1 << encoder->port;
11623                         break;
11624                 default:
11625                         break;
11626                 }
11627         }
11628         drm_connector_list_iter_end(&conn_iter);
11629
11630         /* can't mix MST and SST/HDMI on the same port */
11631         if (used_ports & used_mst_ports)
11632                 return false;
11633
11634         return ret;
11635 }
11636
11637 static int
11638 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11639 {
11640         struct drm_i915_private *dev_priv =
11641                 to_i915(crtc_state->base.crtc->dev);
11642         struct intel_crtc_state *saved_state;
11643
11644         saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
11645         if (!saved_state)
11646                 return -ENOMEM;
11647
11648         /* FIXME: before the switch to atomic started, a new pipe_config was
11649          * kzalloc'd. Code that depends on any field being zero should be
11650          * fixed, so that the crtc_state can be safely duplicated. For now,
11651          * only fields that are know to not cause problems are preserved. */
11652
11653         saved_state->scaler_state = crtc_state->scaler_state;
11654         saved_state->shared_dpll = crtc_state->shared_dpll;
11655         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
11656         saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru;
11657         saved_state->ips_force_disable = crtc_state->ips_force_disable;
11658         if (IS_G4X(dev_priv) ||
11659             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11660                 saved_state->wm = crtc_state->wm;
11661
11662         /* Keep base drm_crtc_state intact, only clear our extended struct */
11663         BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
11664         memcpy(&crtc_state->base + 1, &saved_state->base + 1,
11665                sizeof(*crtc_state) - sizeof(crtc_state->base));
11666
11667         kfree(saved_state);
11668         return 0;
11669 }
11670
11671 static int
11672 intel_modeset_pipe_config(struct drm_crtc *crtc,
11673                           struct intel_crtc_state *pipe_config)
11674 {
11675         struct drm_atomic_state *state = pipe_config->base.state;
11676         struct intel_encoder *encoder;
11677         struct drm_connector *connector;
11678         struct drm_connector_state *connector_state;
11679         int base_bpp, ret;
11680         int i;
11681         bool retry = true;
11682
11683         ret = clear_intel_crtc_state(pipe_config);
11684         if (ret)
11685                 return ret;
11686
11687         pipe_config->cpu_transcoder =
11688                 (enum transcoder) to_intel_crtc(crtc)->pipe;
11689
11690         /*
11691          * Sanitize sync polarity flags based on requested ones. If neither
11692          * positive or negative polarity is requested, treat this as meaning
11693          * negative polarity.
11694          */
11695         if (!(pipe_config->base.adjusted_mode.flags &
11696               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
11697                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
11698
11699         if (!(pipe_config->base.adjusted_mode.flags &
11700               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
11701                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
11702
11703         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11704                                         pipe_config);
11705         if (ret)
11706                 return ret;
11707
11708         base_bpp = pipe_config->pipe_bpp;
11709
11710         /*
11711          * Determine the real pipe dimensions. Note that stereo modes can
11712          * increase the actual pipe size due to the frame doubling and
11713          * insertion of additional space for blanks between the frame. This
11714          * is stored in the crtc timings. We use the requested mode to do this
11715          * computation to clearly distinguish it from the adjusted mode, which
11716          * can be changed by the connectors in the below retry loop.
11717          */
11718         drm_mode_get_hv_timing(&pipe_config->base.mode,
11719                                &pipe_config->pipe_src_w,
11720                                &pipe_config->pipe_src_h);
11721
11722         for_each_new_connector_in_state(state, connector, connector_state, i) {
11723                 if (connector_state->crtc != crtc)
11724                         continue;
11725
11726                 encoder = to_intel_encoder(connector_state->best_encoder);
11727
11728                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11729                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11730                         return -EINVAL;
11731                 }
11732
11733                 /*
11734                  * Determine output_types before calling the .compute_config()
11735                  * hooks so that the hooks can use this information safely.
11736                  */
11737                 if (encoder->compute_output_type)
11738                         pipe_config->output_types |=
11739                                 BIT(encoder->compute_output_type(encoder, pipe_config,
11740                                                                  connector_state));
11741                 else
11742                         pipe_config->output_types |= BIT(encoder->type);
11743         }
11744
11745 encoder_retry:
11746         /* Ensure the port clock defaults are reset when retrying. */
11747         pipe_config->port_clock = 0;
11748         pipe_config->pixel_multiplier = 1;
11749
11750         /* Fill in default crtc timings, allow encoders to overwrite them. */
11751         drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11752                               CRTC_STEREO_DOUBLE);
11753
11754         /* Pass our mode to the connectors and the CRTC to give them a chance to
11755          * adjust it according to limitations or connector properties, and also
11756          * a chance to reject the mode entirely.
11757          */
11758         for_each_new_connector_in_state(state, connector, connector_state, i) {
11759                 if (connector_state->crtc != crtc)
11760                         continue;
11761
11762                 encoder = to_intel_encoder(connector_state->best_encoder);
11763                 ret = encoder->compute_config(encoder, pipe_config,
11764                                               connector_state);
11765                 if (ret < 0) {
11766                         if (ret != -EDEADLK)
11767                                 DRM_DEBUG_KMS("Encoder config failure: %d\n",
11768                                               ret);
11769                         return ret;
11770                 }
11771         }
11772
11773         /* Set default port clock if not overwritten by the encoder. Needs to be
11774          * done afterwards in case the encoder adjusts the mode. */
11775         if (!pipe_config->port_clock)
11776                 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
11777                         * pipe_config->pixel_multiplier;
11778
11779         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
11780         if (ret == -EDEADLK)
11781                 return ret;
11782         if (ret < 0) {
11783                 DRM_DEBUG_KMS("CRTC fixup failed\n");
11784                 return ret;
11785         }
11786
11787         if (ret == RETRY) {
11788                 if (WARN(!retry, "loop in pipe configuration computation\n"))
11789                         return -EINVAL;
11790
11791                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11792                 retry = false;
11793                 goto encoder_retry;
11794         }
11795
11796         /* Dithering seems to not pass-through bits correctly when it should, so
11797          * only enable it on 6bpc panels and when its not a compliance
11798          * test requesting 6bpc video pattern.
11799          */
11800         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
11801                 !pipe_config->dither_force_disable;
11802         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
11803                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11804
11805         return 0;
11806 }
11807
11808 static bool intel_fuzzy_clock_check(int clock1, int clock2)
11809 {
11810         int diff;
11811
11812         if (clock1 == clock2)
11813                 return true;
11814
11815         if (!clock1 || !clock2)
11816                 return false;
11817
11818         diff = abs(clock1 - clock2);
11819
11820         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11821                 return true;
11822
11823         return false;
11824 }
11825
11826 static bool
11827 intel_compare_m_n(unsigned int m, unsigned int n,
11828                   unsigned int m2, unsigned int n2,
11829                   bool exact)
11830 {
11831         if (m == m2 && n == n2)
11832                 return true;
11833
11834         if (exact || !m || !n || !m2 || !n2)
11835                 return false;
11836
11837         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11838
11839         if (n > n2) {
11840                 while (n > n2) {
11841                         m2 <<= 1;
11842                         n2 <<= 1;
11843                 }
11844         } else if (n < n2) {
11845                 while (n < n2) {
11846                         m <<= 1;
11847                         n <<= 1;
11848                 }
11849         }
11850
11851         if (n != n2)
11852                 return false;
11853
11854         return intel_fuzzy_clock_check(m, m2);
11855 }
11856
11857 static bool
11858 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11859                        struct intel_link_m_n *m2_n2,
11860                        bool adjust)
11861 {
11862         if (m_n->tu == m2_n2->tu &&
11863             intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
11864                               m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
11865             intel_compare_m_n(m_n->link_m, m_n->link_n,
11866                               m2_n2->link_m, m2_n2->link_n, !adjust)) {
11867                 if (adjust)
11868                         *m2_n2 = *m_n;
11869
11870                 return true;
11871         }
11872
11873         return false;
11874 }
11875
11876 static void __printf(3, 4)
11877 pipe_config_err(bool adjust, const char *name, const char *format, ...)
11878 {
11879         struct va_format vaf;
11880         va_list args;
11881
11882         va_start(args, format);
11883         vaf.fmt = format;
11884         vaf.va = &args;
11885
11886         if (adjust)
11887                 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
11888         else
11889                 drm_err("mismatch in %s %pV", name, &vaf);
11890
11891         va_end(args);
11892 }
11893
11894 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
11895 {
11896         if (i915_modparams.fastboot != -1)
11897                 return i915_modparams.fastboot;
11898
11899         /* Enable fastboot by default on Skylake and newer */
11900         if (INTEL_GEN(dev_priv) >= 9)
11901                 return true;
11902
11903         /* Enable fastboot by default on VLV and CHV */
11904         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11905                 return true;
11906
11907         /* Disabled by default on all others */
11908         return false;
11909 }
11910
11911 static bool
11912 intel_pipe_config_compare(struct drm_i915_private *dev_priv,
11913                           struct intel_crtc_state *current_config,
11914                           struct intel_crtc_state *pipe_config,
11915                           bool adjust)
11916 {
11917         bool ret = true;
11918         bool fixup_inherited = adjust &&
11919                 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
11920                 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
11921
11922         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
11923                 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
11924                 ret = false;
11925         }
11926
11927 #define PIPE_CONF_CHECK_X(name) do { \
11928         if (current_config->name != pipe_config->name) { \
11929                 pipe_config_err(adjust, __stringify(name), \
11930                           "(expected 0x%08x, found 0x%08x)\n", \
11931                           current_config->name, \
11932                           pipe_config->name); \
11933                 ret = false; \
11934         } \
11935 } while (0)
11936
11937 #define PIPE_CONF_CHECK_I(name) do { \
11938         if (current_config->name != pipe_config->name) { \
11939                 pipe_config_err(adjust, __stringify(name), \
11940                           "(expected %i, found %i)\n", \
11941                           current_config->name, \
11942                           pipe_config->name); \
11943                 ret = false; \
11944         } \
11945 } while (0)
11946
11947 #define PIPE_CONF_CHECK_BOOL(name) do { \
11948         if (current_config->name != pipe_config->name) { \
11949                 pipe_config_err(adjust, __stringify(name), \
11950                           "(expected %s, found %s)\n", \
11951                           yesno(current_config->name), \
11952                           yesno(pipe_config->name)); \
11953                 ret = false; \
11954         } \
11955 } while (0)
11956
11957 /*
11958  * Checks state where we only read out the enabling, but not the entire
11959  * state itself (like full infoframes or ELD for audio). These states
11960  * require a full modeset on bootup to fix up.
11961  */
11962 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
11963         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
11964                 PIPE_CONF_CHECK_BOOL(name); \
11965         } else { \
11966                 pipe_config_err(adjust, __stringify(name), \
11967                           "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
11968                           yesno(current_config->name), \
11969                           yesno(pipe_config->name)); \
11970                 ret = false; \
11971         } \
11972 } while (0)
11973
11974 #define PIPE_CONF_CHECK_P(name) do { \
11975         if (current_config->name != pipe_config->name) { \
11976                 pipe_config_err(adjust, __stringify(name), \
11977                           "(expected %p, found %p)\n", \
11978                           current_config->name, \
11979                           pipe_config->name); \
11980                 ret = false; \
11981         } \
11982 } while (0)
11983
11984 #define PIPE_CONF_CHECK_M_N(name) do { \
11985         if (!intel_compare_link_m_n(&current_config->name, \
11986                                     &pipe_config->name,\
11987                                     adjust)) { \
11988                 pipe_config_err(adjust, __stringify(name), \
11989                           "(expected tu %i gmch %i/%i link %i/%i, " \
11990                           "found tu %i, gmch %i/%i link %i/%i)\n", \
11991                           current_config->name.tu, \
11992                           current_config->name.gmch_m, \
11993                           current_config->name.gmch_n, \
11994                           current_config->name.link_m, \
11995                           current_config->name.link_n, \
11996                           pipe_config->name.tu, \
11997                           pipe_config->name.gmch_m, \
11998                           pipe_config->name.gmch_n, \
11999                           pipe_config->name.link_m, \
12000                           pipe_config->name.link_n); \
12001                 ret = false; \
12002         } \
12003 } while (0)
12004
12005 /* This is required for BDW+ where there is only one set of registers for
12006  * switching between high and low RR.
12007  * This macro can be used whenever a comparison has to be made between one
12008  * hw state and multiple sw state variables.
12009  */
12010 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
12011         if (!intel_compare_link_m_n(&current_config->name, \
12012                                     &pipe_config->name, adjust) && \
12013             !intel_compare_link_m_n(&current_config->alt_name, \
12014                                     &pipe_config->name, adjust)) { \
12015                 pipe_config_err(adjust, __stringify(name), \
12016                           "(expected tu %i gmch %i/%i link %i/%i, " \
12017                           "or tu %i gmch %i/%i link %i/%i, " \
12018                           "found tu %i, gmch %i/%i link %i/%i)\n", \
12019                           current_config->name.tu, \
12020                           current_config->name.gmch_m, \
12021                           current_config->name.gmch_n, \
12022                           current_config->name.link_m, \
12023                           current_config->name.link_n, \
12024                           current_config->alt_name.tu, \
12025                           current_config->alt_name.gmch_m, \
12026                           current_config->alt_name.gmch_n, \
12027                           current_config->alt_name.link_m, \
12028                           current_config->alt_name.link_n, \
12029                           pipe_config->name.tu, \
12030                           pipe_config->name.gmch_m, \
12031                           pipe_config->name.gmch_n, \
12032                           pipe_config->name.link_m, \
12033                           pipe_config->name.link_n); \
12034                 ret = false; \
12035         } \
12036 } while (0)
12037
12038 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
12039         if ((current_config->name ^ pipe_config->name) & (mask)) { \
12040                 pipe_config_err(adjust, __stringify(name), \
12041                           "(%x) (expected %i, found %i)\n", \
12042                           (mask), \
12043                           current_config->name & (mask), \
12044                           pipe_config->name & (mask)); \
12045                 ret = false; \
12046         } \
12047 } while (0)
12048
12049 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
12050         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12051                 pipe_config_err(adjust, __stringify(name), \
12052                           "(expected %i, found %i)\n", \
12053                           current_config->name, \
12054                           pipe_config->name); \
12055                 ret = false; \
12056         } \
12057 } while (0)
12058
12059 #define PIPE_CONF_QUIRK(quirk)  \
12060         ((current_config->quirks | pipe_config->quirks) & (quirk))
12061
12062         PIPE_CONF_CHECK_I(cpu_transcoder);
12063
12064         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
12065         PIPE_CONF_CHECK_I(fdi_lanes);
12066         PIPE_CONF_CHECK_M_N(fdi_m_n);
12067
12068         PIPE_CONF_CHECK_I(lane_count);
12069         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12070
12071         if (INTEL_GEN(dev_priv) < 8) {
12072                 PIPE_CONF_CHECK_M_N(dp_m_n);
12073
12074                 if (current_config->has_drrs)
12075                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
12076         } else
12077                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12078
12079         PIPE_CONF_CHECK_X(output_types);
12080
12081         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12082         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12083         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12084         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12085         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12086         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12087
12088         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12089         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12090         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12091         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12092         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12093         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12094
12095         PIPE_CONF_CHECK_I(pixel_multiplier);
12096         PIPE_CONF_CHECK_I(output_format);
12097         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
12098         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
12099             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12100                 PIPE_CONF_CHECK_BOOL(limited_color_range);
12101
12102         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
12103         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
12104         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
12105
12106         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
12107
12108         PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12109                               DRM_MODE_FLAG_INTERLACE);
12110
12111         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12112                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12113                                       DRM_MODE_FLAG_PHSYNC);
12114                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12115                                       DRM_MODE_FLAG_NHSYNC);
12116                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12117                                       DRM_MODE_FLAG_PVSYNC);
12118                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12119                                       DRM_MODE_FLAG_NVSYNC);
12120         }
12121
12122         PIPE_CONF_CHECK_X(gmch_pfit.control);
12123         /* pfit ratios are autocomputed by the hw on gen4+ */
12124         if (INTEL_GEN(dev_priv) < 4)
12125                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12126         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12127
12128         if (!adjust) {
12129                 PIPE_CONF_CHECK_I(pipe_src_w);
12130                 PIPE_CONF_CHECK_I(pipe_src_h);
12131
12132                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
12133                 if (current_config->pch_pfit.enabled) {
12134                         PIPE_CONF_CHECK_X(pch_pfit.pos);
12135                         PIPE_CONF_CHECK_X(pch_pfit.size);
12136                 }
12137
12138                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12139                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
12140         }
12141
12142         PIPE_CONF_CHECK_BOOL(double_wide);
12143
12144         PIPE_CONF_CHECK_P(shared_dpll);
12145         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12146         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12147         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12148         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12149         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12150         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12151         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12152         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12153         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12154         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
12155         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
12156         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
12157         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
12158         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
12159         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
12160         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
12161         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
12162         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
12163         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
12164         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
12165         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
12166         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
12167         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
12168         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
12169         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
12170         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
12171         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
12172         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
12173         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
12174         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
12175         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
12176
12177         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12178         PIPE_CONF_CHECK_X(dsi_pll.div);
12179
12180         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
12181                 PIPE_CONF_CHECK_I(pipe_bpp);
12182
12183         PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12184         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12185
12186         PIPE_CONF_CHECK_I(min_voltage_level);
12187
12188 #undef PIPE_CONF_CHECK_X
12189 #undef PIPE_CONF_CHECK_I
12190 #undef PIPE_CONF_CHECK_BOOL
12191 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
12192 #undef PIPE_CONF_CHECK_P
12193 #undef PIPE_CONF_CHECK_FLAGS
12194 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12195 #undef PIPE_CONF_QUIRK
12196
12197         return ret;
12198 }
12199
12200 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12201                                            const struct intel_crtc_state *pipe_config)
12202 {
12203         if (pipe_config->has_pch_encoder) {
12204                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12205                                                             &pipe_config->fdi_m_n);
12206                 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12207
12208                 /*
12209                  * FDI already provided one idea for the dotclock.
12210                  * Yell if the encoder disagrees.
12211                  */
12212                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12213                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12214                      fdi_dotclock, dotclock);
12215         }
12216 }
12217
12218 static void verify_wm_state(struct drm_crtc *crtc,
12219                             struct drm_crtc_state *new_state)
12220 {
12221         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
12222         struct skl_ddb_allocation hw_ddb, *sw_ddb;
12223         struct skl_pipe_wm hw_wm, *sw_wm;
12224         struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12225         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
12226         struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
12227         struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
12228         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12229         const enum pipe pipe = intel_crtc->pipe;
12230         int plane, level, max_level = ilk_wm_max_level(dev_priv);
12231
12232         if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
12233                 return;
12234
12235         skl_pipe_wm_get_hw_state(intel_crtc, &hw_wm);
12236         sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
12237
12238         skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
12239
12240         skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12241         sw_ddb = &dev_priv->wm.skl_hw.ddb;
12242
12243         if (INTEL_GEN(dev_priv) >= 11)
12244                 if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
12245                         DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
12246                                   sw_ddb->enabled_slices,
12247                                   hw_ddb.enabled_slices);
12248         /* planes */
12249         for_each_universal_plane(dev_priv, pipe, plane) {
12250                 hw_plane_wm = &hw_wm.planes[plane];
12251                 sw_plane_wm = &sw_wm->planes[plane];
12252
12253                 /* Watermarks */
12254                 for (level = 0; level <= max_level; level++) {
12255                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12256                                                 &sw_plane_wm->wm[level]))
12257                                 continue;
12258
12259                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12260                                   pipe_name(pipe), plane + 1, level,
12261                                   sw_plane_wm->wm[level].plane_en,
12262                                   sw_plane_wm->wm[level].plane_res_b,
12263                                   sw_plane_wm->wm[level].plane_res_l,
12264                                   hw_plane_wm->wm[level].plane_en,
12265                                   hw_plane_wm->wm[level].plane_res_b,
12266                                   hw_plane_wm->wm[level].plane_res_l);
12267                 }
12268
12269                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12270                                          &sw_plane_wm->trans_wm)) {
12271                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12272                                   pipe_name(pipe), plane + 1,
12273                                   sw_plane_wm->trans_wm.plane_en,
12274                                   sw_plane_wm->trans_wm.plane_res_b,
12275                                   sw_plane_wm->trans_wm.plane_res_l,
12276                                   hw_plane_wm->trans_wm.plane_en,
12277                                   hw_plane_wm->trans_wm.plane_res_b,
12278                                   hw_plane_wm->trans_wm.plane_res_l);
12279                 }
12280
12281                 /* DDB */
12282                 hw_ddb_entry = &hw_ddb_y[plane];
12283                 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
12284
12285                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12286                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
12287                                   pipe_name(pipe), plane + 1,
12288                                   sw_ddb_entry->start, sw_ddb_entry->end,
12289                                   hw_ddb_entry->start, hw_ddb_entry->end);
12290                 }
12291         }
12292
12293         /*
12294          * cursor
12295          * If the cursor plane isn't active, we may not have updated it's ddb
12296          * allocation. In that case since the ddb allocation will be updated
12297          * once the plane becomes visible, we can skip this check
12298          */
12299         if (1) {
12300                 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
12301                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
12302
12303                 /* Watermarks */
12304                 for (level = 0; level <= max_level; level++) {
12305                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12306                                                 &sw_plane_wm->wm[level]))
12307                                 continue;
12308
12309                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12310                                   pipe_name(pipe), level,
12311                                   sw_plane_wm->wm[level].plane_en,
12312                                   sw_plane_wm->wm[level].plane_res_b,
12313                                   sw_plane_wm->wm[level].plane_res_l,
12314                                   hw_plane_wm->wm[level].plane_en,
12315                                   hw_plane_wm->wm[level].plane_res_b,
12316                                   hw_plane_wm->wm[level].plane_res_l);
12317                 }
12318
12319                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12320                                          &sw_plane_wm->trans_wm)) {
12321                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12322                                   pipe_name(pipe),
12323                                   sw_plane_wm->trans_wm.plane_en,
12324                                   sw_plane_wm->trans_wm.plane_res_b,
12325                                   sw_plane_wm->trans_wm.plane_res_l,
12326                                   hw_plane_wm->trans_wm.plane_en,
12327                                   hw_plane_wm->trans_wm.plane_res_b,
12328                                   hw_plane_wm->trans_wm.plane_res_l);
12329                 }
12330
12331                 /* DDB */
12332                 hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
12333                 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
12334
12335                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12336                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
12337                                   pipe_name(pipe),
12338                                   sw_ddb_entry->start, sw_ddb_entry->end,
12339                                   hw_ddb_entry->start, hw_ddb_entry->end);
12340                 }
12341         }
12342 }
12343
12344 static void
12345 verify_connector_state(struct drm_device *dev,
12346                        struct drm_atomic_state *state,
12347                        struct drm_crtc *crtc)
12348 {
12349         struct drm_connector *connector;
12350         struct drm_connector_state *new_conn_state;
12351         int i;
12352
12353         for_each_new_connector_in_state(state, connector, new_conn_state, i) {
12354                 struct drm_encoder *encoder = connector->encoder;
12355                 struct drm_crtc_state *crtc_state = NULL;
12356
12357                 if (new_conn_state->crtc != crtc)
12358                         continue;
12359
12360                 if (crtc)
12361                         crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
12362
12363                 intel_connector_verify_state(crtc_state, new_conn_state);
12364
12365                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
12366                      "connector's atomic encoder doesn't match legacy encoder\n");
12367         }
12368 }
12369
12370 static void
12371 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
12372 {
12373         struct intel_encoder *encoder;
12374         struct drm_connector *connector;
12375         struct drm_connector_state *old_conn_state, *new_conn_state;
12376         int i;
12377
12378         for_each_intel_encoder(dev, encoder) {
12379                 bool enabled = false, found = false;
12380                 enum pipe pipe;
12381
12382                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12383                               encoder->base.base.id,
12384                               encoder->base.name);
12385
12386                 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
12387                                                    new_conn_state, i) {
12388                         if (old_conn_state->best_encoder == &encoder->base)
12389                                 found = true;
12390
12391                         if (new_conn_state->best_encoder != &encoder->base)
12392                                 continue;
12393                         found = enabled = true;
12394
12395                         I915_STATE_WARN(new_conn_state->crtc !=
12396                                         encoder->base.crtc,
12397                              "connector's crtc doesn't match encoder crtc\n");
12398                 }
12399
12400                 if (!found)
12401                         continue;
12402
12403                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
12404                      "encoder's enabled state mismatch "
12405                      "(expected %i, found %i)\n",
12406                      !!encoder->base.crtc, enabled);
12407
12408                 if (!encoder->base.crtc) {
12409                         bool active;
12410
12411                         active = encoder->get_hw_state(encoder, &pipe);
12412                         I915_STATE_WARN(active,
12413                              "encoder detached but still enabled on pipe %c.\n",
12414                              pipe_name(pipe));
12415                 }
12416         }
12417 }
12418
12419 static void
12420 verify_crtc_state(struct drm_crtc *crtc,
12421                   struct drm_crtc_state *old_crtc_state,
12422                   struct drm_crtc_state *new_crtc_state)
12423 {
12424         struct drm_device *dev = crtc->dev;
12425         struct drm_i915_private *dev_priv = to_i915(dev);
12426         struct intel_encoder *encoder;
12427         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12428         struct intel_crtc_state *pipe_config, *sw_config;
12429         struct drm_atomic_state *old_state;
12430         bool active;
12431
12432         old_state = old_crtc_state->state;
12433         __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
12434         pipe_config = to_intel_crtc_state(old_crtc_state);
12435         memset(pipe_config, 0, sizeof(*pipe_config));
12436         pipe_config->base.crtc = crtc;
12437         pipe_config->base.state = old_state;
12438
12439         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
12440
12441         active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12442
12443         /* we keep both pipes enabled on 830 */
12444         if (IS_I830(dev_priv))
12445                 active = new_crtc_state->active;
12446
12447         I915_STATE_WARN(new_crtc_state->active != active,
12448              "crtc active state doesn't match with hw state "
12449              "(expected %i, found %i)\n", new_crtc_state->active, active);
12450
12451         I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12452              "transitional active state does not match atomic hw state "
12453              "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
12454
12455         for_each_encoder_on_crtc(dev, crtc, encoder) {
12456                 enum pipe pipe;
12457
12458                 active = encoder->get_hw_state(encoder, &pipe);
12459                 I915_STATE_WARN(active != new_crtc_state->active,
12460                         "[ENCODER:%i] active %i with crtc active %i\n",
12461                         encoder->base.base.id, active, new_crtc_state->active);
12462
12463                 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12464                                 "Encoder connected to wrong pipe %c\n",
12465                                 pipe_name(pipe));
12466
12467                 if (active)
12468                         encoder->get_config(encoder, pipe_config);
12469         }
12470
12471         intel_crtc_compute_pixel_rate(pipe_config);
12472
12473         if (!new_crtc_state->active)
12474                 return;
12475
12476         intel_pipe_config_sanity_check(dev_priv, pipe_config);
12477
12478         sw_config = to_intel_crtc_state(new_crtc_state);
12479         if (!intel_pipe_config_compare(dev_priv, sw_config,
12480                                        pipe_config, false)) {
12481                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
12482                 intel_dump_pipe_config(intel_crtc, pipe_config,
12483                                        "[hw state]");
12484                 intel_dump_pipe_config(intel_crtc, sw_config,
12485                                        "[sw state]");
12486         }
12487 }
12488
12489 static void
12490 intel_verify_planes(struct intel_atomic_state *state)
12491 {
12492         struct intel_plane *plane;
12493         const struct intel_plane_state *plane_state;
12494         int i;
12495
12496         for_each_new_intel_plane_in_state(state, plane,
12497                                           plane_state, i)
12498                 assert_plane(plane, plane_state->base.visible);
12499 }
12500
12501 static void
12502 verify_single_dpll_state(struct drm_i915_private *dev_priv,
12503                          struct intel_shared_dpll *pll,
12504                          struct drm_crtc *crtc,
12505                          struct drm_crtc_state *new_state)
12506 {
12507         struct intel_dpll_hw_state dpll_hw_state;
12508         unsigned int crtc_mask;
12509         bool active;
12510
12511         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12512
12513         DRM_DEBUG_KMS("%s\n", pll->info->name);
12514
12515         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
12516
12517         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
12518                 I915_STATE_WARN(!pll->on && pll->active_mask,
12519                      "pll in active use but not on in sw tracking\n");
12520                 I915_STATE_WARN(pll->on && !pll->active_mask,
12521                      "pll is on but not used by any active crtc\n");
12522                 I915_STATE_WARN(pll->on != active,
12523                      "pll on state mismatch (expected %i, found %i)\n",
12524                      pll->on, active);
12525         }
12526
12527         if (!crtc) {
12528                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
12529                                 "more active pll users than references: %x vs %x\n",
12530                                 pll->active_mask, pll->state.crtc_mask);
12531
12532                 return;
12533         }
12534
12535         crtc_mask = drm_crtc_mask(crtc);
12536
12537         if (new_state->active)
12538                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12539                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12540                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12541         else
12542                 I915_STATE_WARN(pll->active_mask & crtc_mask,
12543                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12544                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12545
12546         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
12547                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
12548                         crtc_mask, pll->state.crtc_mask);
12549
12550         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
12551                                           &dpll_hw_state,
12552                                           sizeof(dpll_hw_state)),
12553                         "pll hw state mismatch\n");
12554 }
12555
12556 static void
12557 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12558                          struct drm_crtc_state *old_crtc_state,
12559                          struct drm_crtc_state *new_crtc_state)
12560 {
12561         struct drm_i915_private *dev_priv = to_i915(dev);
12562         struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12563         struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12564
12565         if (new_state->shared_dpll)
12566                 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
12567
12568         if (old_state->shared_dpll &&
12569             old_state->shared_dpll != new_state->shared_dpll) {
12570                 unsigned int crtc_mask = drm_crtc_mask(crtc);
12571                 struct intel_shared_dpll *pll = old_state->shared_dpll;
12572
12573                 I915_STATE_WARN(pll->active_mask & crtc_mask,
12574                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
12575                                 pipe_name(drm_crtc_index(crtc)));
12576                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
12577                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
12578                                 pipe_name(drm_crtc_index(crtc)));
12579         }
12580 }
12581
12582 static void
12583 intel_modeset_verify_crtc(struct drm_crtc *crtc,
12584                           struct drm_atomic_state *state,
12585                           struct drm_crtc_state *old_state,
12586                           struct drm_crtc_state *new_state)
12587 {
12588         if (!needs_modeset(new_state) &&
12589             !to_intel_crtc_state(new_state)->update_pipe)
12590                 return;
12591
12592         verify_wm_state(crtc, new_state);
12593         verify_connector_state(crtc->dev, state, crtc);
12594         verify_crtc_state(crtc, old_state, new_state);
12595         verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
12596 }
12597
12598 static void
12599 verify_disabled_dpll_state(struct drm_device *dev)
12600 {
12601         struct drm_i915_private *dev_priv = to_i915(dev);
12602         int i;
12603
12604         for (i = 0; i < dev_priv->num_shared_dpll; i++)
12605                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
12606 }
12607
12608 static void
12609 intel_modeset_verify_disabled(struct drm_device *dev,
12610                               struct drm_atomic_state *state)
12611 {
12612         verify_encoder_state(dev, state);
12613         verify_connector_state(dev, state, NULL);
12614         verify_disabled_dpll_state(dev);
12615 }
12616
12617 static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
12618 {
12619         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
12620         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12621
12622         /*
12623          * The scanline counter increments at the leading edge of hsync.
12624          *
12625          * On most platforms it starts counting from vtotal-1 on the
12626          * first active line. That means the scanline counter value is
12627          * always one less than what we would expect. Ie. just after
12628          * start of vblank, which also occurs at start of hsync (on the
12629          * last active line), the scanline counter will read vblank_start-1.
12630          *
12631          * On gen2 the scanline counter starts counting from 1 instead
12632          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12633          * to keep the value positive), instead of adding one.
12634          *
12635          * On HSW+ the behaviour of the scanline counter depends on the output
12636          * type. For DP ports it behaves like most other platforms, but on HDMI
12637          * there's an extra 1 line difference. So we need to add two instead of
12638          * one to the value.
12639          *
12640          * On VLV/CHV DSI the scanline counter would appear to increment
12641          * approx. 1/3 of a scanline before start of vblank. Unfortunately
12642          * that means we can't tell whether we're in vblank or not while
12643          * we're on that particular line. We must still set scanline_offset
12644          * to 1 so that the vblank timestamps come out correct when we query
12645          * the scanline counter from within the vblank interrupt handler.
12646          * However if queried just before the start of vblank we'll get an
12647          * answer that's slightly in the future.
12648          */
12649         if (IS_GEN(dev_priv, 2)) {
12650                 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
12651                 int vtotal;
12652
12653                 vtotal = adjusted_mode->crtc_vtotal;
12654                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
12655                         vtotal /= 2;
12656
12657                 crtc->scanline_offset = vtotal - 1;
12658         } else if (HAS_DDI(dev_priv) &&
12659                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
12660                 crtc->scanline_offset = 2;
12661         } else
12662                 crtc->scanline_offset = 1;
12663 }
12664
12665 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
12666 {
12667         struct drm_device *dev = state->dev;
12668         struct drm_i915_private *dev_priv = to_i915(dev);
12669         struct drm_crtc *crtc;
12670         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12671         int i;
12672
12673         if (!dev_priv->display.crtc_compute_clock)
12674                 return;
12675
12676         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12677                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12678                 struct intel_shared_dpll *old_dpll =
12679                         to_intel_crtc_state(old_crtc_state)->shared_dpll;
12680
12681                 if (!needs_modeset(new_crtc_state))
12682                         continue;
12683
12684                 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
12685
12686                 if (!old_dpll)
12687                         continue;
12688
12689                 intel_release_shared_dpll(old_dpll, intel_crtc, state);
12690         }
12691 }
12692
12693 /*
12694  * This implements the workaround described in the "notes" section of the mode
12695  * set sequence documentation. When going from no pipes or single pipe to
12696  * multiple pipes, and planes are enabled after the pipe, we need to wait at
12697  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12698  */
12699 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12700 {
12701         struct drm_crtc_state *crtc_state;
12702         struct intel_crtc *intel_crtc;
12703         struct drm_crtc *crtc;
12704         struct intel_crtc_state *first_crtc_state = NULL;
12705         struct intel_crtc_state *other_crtc_state = NULL;
12706         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12707         int i;
12708
12709         /* look at all crtc's that are going to be enabled in during modeset */
12710         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
12711                 intel_crtc = to_intel_crtc(crtc);
12712
12713                 if (!crtc_state->active || !needs_modeset(crtc_state))
12714                         continue;
12715
12716                 if (first_crtc_state) {
12717                         other_crtc_state = to_intel_crtc_state(crtc_state);
12718                         break;
12719                 } else {
12720                         first_crtc_state = to_intel_crtc_state(crtc_state);
12721                         first_pipe = intel_crtc->pipe;
12722                 }
12723         }
12724
12725         /* No workaround needed? */
12726         if (!first_crtc_state)
12727                 return 0;
12728
12729         /* w/a possibly needed, check how many crtc's are already enabled. */
12730         for_each_intel_crtc(state->dev, intel_crtc) {
12731                 struct intel_crtc_state *pipe_config;
12732
12733                 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12734                 if (IS_ERR(pipe_config))
12735                         return PTR_ERR(pipe_config);
12736
12737                 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12738
12739                 if (!pipe_config->base.active ||
12740                     needs_modeset(&pipe_config->base))
12741                         continue;
12742
12743                 /* 2 or more enabled crtcs means no need for w/a */
12744                 if (enabled_pipe != INVALID_PIPE)
12745                         return 0;
12746
12747                 enabled_pipe = intel_crtc->pipe;
12748         }
12749
12750         if (enabled_pipe != INVALID_PIPE)
12751                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
12752         else if (other_crtc_state)
12753                 other_crtc_state->hsw_workaround_pipe = first_pipe;
12754
12755         return 0;
12756 }
12757
12758 static int intel_lock_all_pipes(struct drm_atomic_state *state)
12759 {
12760         struct drm_crtc *crtc;
12761
12762         /* Add all pipes to the state */
12763         for_each_crtc(state->dev, crtc) {
12764                 struct drm_crtc_state *crtc_state;
12765
12766                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12767                 if (IS_ERR(crtc_state))
12768                         return PTR_ERR(crtc_state);
12769         }
12770
12771         return 0;
12772 }
12773
12774 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12775 {
12776         struct drm_crtc *crtc;
12777
12778         /*
12779          * Add all pipes to the state, and force
12780          * a modeset on all the active ones.
12781          */
12782         for_each_crtc(state->dev, crtc) {
12783                 struct drm_crtc_state *crtc_state;
12784                 int ret;
12785
12786                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12787                 if (IS_ERR(crtc_state))
12788                         return PTR_ERR(crtc_state);
12789
12790                 if (!crtc_state->active || needs_modeset(crtc_state))
12791                         continue;
12792
12793                 crtc_state->mode_changed = true;
12794
12795                 ret = drm_atomic_add_affected_connectors(state, crtc);
12796                 if (ret)
12797                         return ret;
12798
12799                 ret = drm_atomic_add_affected_planes(state, crtc);
12800                 if (ret)
12801                         return ret;
12802         }
12803
12804         return 0;
12805 }
12806
12807 static int intel_modeset_checks(struct drm_atomic_state *state)
12808 {
12809         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12810         struct drm_i915_private *dev_priv = to_i915(state->dev);
12811         struct drm_crtc *crtc;
12812         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12813         int ret = 0, i;
12814
12815         if (!check_digital_port_conflicts(state)) {
12816                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
12817                 return -EINVAL;
12818         }
12819
12820         intel_state->modeset = true;
12821         intel_state->active_crtcs = dev_priv->active_crtcs;
12822         intel_state->cdclk.logical = dev_priv->cdclk.logical;
12823         intel_state->cdclk.actual = dev_priv->cdclk.actual;
12824
12825         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12826                 if (new_crtc_state->active)
12827                         intel_state->active_crtcs |= 1 << i;
12828                 else
12829                         intel_state->active_crtcs &= ~(1 << i);
12830
12831                 if (old_crtc_state->active != new_crtc_state->active)
12832                         intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
12833         }
12834
12835         /*
12836          * See if the config requires any additional preparation, e.g.
12837          * to adjust global state with pipes off.  We need to do this
12838          * here so we can get the modeset_pipe updated config for the new
12839          * mode set on this crtc.  For other crtcs we need to use the
12840          * adjusted_mode bits in the crtc directly.
12841          */
12842         if (dev_priv->display.modeset_calc_cdclk) {
12843                 ret = dev_priv->display.modeset_calc_cdclk(state);
12844                 if (ret < 0)
12845                         return ret;
12846
12847                 /*
12848                  * Writes to dev_priv->cdclk.logical must protected by
12849                  * holding all the crtc locks, even if we don't end up
12850                  * touching the hardware
12851                  */
12852                 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
12853                                         &intel_state->cdclk.logical)) {
12854                         ret = intel_lock_all_pipes(state);
12855                         if (ret < 0)
12856                                 return ret;
12857                 }
12858
12859                 /* All pipes must be switched off while we change the cdclk. */
12860                 if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
12861                                               &intel_state->cdclk.actual)) {
12862                         ret = intel_modeset_all_pipes(state);
12863                         if (ret < 0)
12864                                 return ret;
12865                 }
12866
12867                 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
12868                               intel_state->cdclk.logical.cdclk,
12869                               intel_state->cdclk.actual.cdclk);
12870                 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
12871                               intel_state->cdclk.logical.voltage_level,
12872                               intel_state->cdclk.actual.voltage_level);
12873         } else {
12874                 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
12875         }
12876
12877         intel_modeset_clear_plls(state);
12878
12879         if (IS_HASWELL(dev_priv))
12880                 return haswell_mode_set_planes_workaround(state);
12881
12882         return 0;
12883 }
12884
12885 /*
12886  * Handle calculation of various watermark data at the end of the atomic check
12887  * phase.  The code here should be run after the per-crtc and per-plane 'check'
12888  * handlers to ensure that all derived state has been updated.
12889  */
12890 static int calc_watermark_data(struct intel_atomic_state *state)
12891 {
12892         struct drm_device *dev = state->base.dev;
12893         struct drm_i915_private *dev_priv = to_i915(dev);
12894
12895         /* Is there platform-specific watermark information to calculate? */
12896         if (dev_priv->display.compute_global_watermarks)
12897                 return dev_priv->display.compute_global_watermarks(state);
12898
12899         return 0;
12900 }
12901
12902 /**
12903  * intel_atomic_check - validate state object
12904  * @dev: drm device
12905  * @state: state to validate
12906  */
12907 static int intel_atomic_check(struct drm_device *dev,
12908                               struct drm_atomic_state *state)
12909 {
12910         struct drm_i915_private *dev_priv = to_i915(dev);
12911         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12912         struct drm_crtc *crtc;
12913         struct drm_crtc_state *old_crtc_state, *crtc_state;
12914         int ret, i;
12915         bool any_ms = false;
12916
12917         /* Catch I915_MODE_FLAG_INHERITED */
12918         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
12919                                       crtc_state, i) {
12920                 if (crtc_state->mode.private_flags !=
12921                     old_crtc_state->mode.private_flags)
12922                         crtc_state->mode_changed = true;
12923         }
12924
12925         ret = drm_atomic_helper_check_modeset(dev, state);
12926         if (ret)
12927                 return ret;
12928
12929         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
12930                 struct intel_crtc_state *pipe_config =
12931                         to_intel_crtc_state(crtc_state);
12932
12933                 if (!needs_modeset(crtc_state))
12934                         continue;
12935
12936                 if (!crtc_state->enable) {
12937                         any_ms = true;
12938                         continue;
12939                 }
12940
12941                 ret = intel_modeset_pipe_config(crtc, pipe_config);
12942                 if (ret == -EDEADLK)
12943                         return ret;
12944                 if (ret) {
12945                         intel_dump_pipe_config(to_intel_crtc(crtc),
12946                                                pipe_config, "[failed]");
12947                         return ret;
12948                 }
12949
12950                 if (intel_pipe_config_compare(dev_priv,
12951                                         to_intel_crtc_state(old_crtc_state),
12952                                         pipe_config, true)) {
12953                         crtc_state->mode_changed = false;
12954                         pipe_config->update_pipe = true;
12955                 }
12956
12957                 if (needs_modeset(crtc_state))
12958                         any_ms = true;
12959
12960                 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
12961                                        needs_modeset(crtc_state) ?
12962                                        "[modeset]" : "[fastset]");
12963         }
12964
12965         ret = drm_dp_mst_atomic_check(state);
12966         if (ret)
12967                 return ret;
12968
12969         if (any_ms) {
12970                 ret = intel_modeset_checks(state);
12971
12972                 if (ret)
12973                         return ret;
12974         } else {
12975                 intel_state->cdclk.logical = dev_priv->cdclk.logical;
12976         }
12977
12978         ret = icl_add_linked_planes(intel_state);
12979         if (ret)
12980                 return ret;
12981
12982         ret = drm_atomic_helper_check_planes(dev, state);
12983         if (ret)
12984                 return ret;
12985
12986         intel_fbc_choose_crtc(dev_priv, intel_state);
12987         return calc_watermark_data(intel_state);
12988 }
12989
12990 static int intel_atomic_prepare_commit(struct drm_device *dev,
12991                                        struct drm_atomic_state *state)
12992 {
12993         return drm_atomic_helper_prepare_planes(dev, state);
12994 }
12995
12996 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
12997 {
12998         struct drm_device *dev = crtc->base.dev;
12999         struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
13000
13001         if (!vblank->max_vblank_count)
13002                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
13003
13004         return dev->driver->get_vblank_counter(dev, crtc->pipe);
13005 }
13006
13007 static void intel_update_crtc(struct drm_crtc *crtc,
13008                               struct drm_atomic_state *state,
13009                               struct drm_crtc_state *old_crtc_state,
13010                               struct drm_crtc_state *new_crtc_state)
13011 {
13012         struct drm_device *dev = crtc->dev;
13013         struct drm_i915_private *dev_priv = to_i915(dev);
13014         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13015         struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
13016         bool modeset = needs_modeset(new_crtc_state);
13017         struct intel_plane_state *new_plane_state =
13018                 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
13019                                                  to_intel_plane(crtc->primary));
13020
13021         if (modeset) {
13022                 update_scanline_offset(pipe_config);
13023                 dev_priv->display.crtc_enable(pipe_config, state);
13024
13025                 /* vblanks work again, re-enable pipe CRC. */
13026                 intel_crtc_enable_pipe_crc(intel_crtc);
13027         } else {
13028                 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
13029                                        pipe_config);
13030
13031                 if (pipe_config->update_pipe)
13032                         intel_encoders_update_pipe(crtc, pipe_config, state);
13033         }
13034
13035         if (pipe_config->update_pipe && !pipe_config->enable_fbc)
13036                 intel_fbc_disable(intel_crtc);
13037         else if (new_plane_state)
13038                 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
13039
13040         intel_begin_crtc_commit(crtc, old_crtc_state);
13041
13042         if (INTEL_GEN(dev_priv) >= 9)
13043                 skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13044         else
13045                 i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13046
13047         intel_finish_crtc_commit(crtc, old_crtc_state);
13048 }
13049
13050 static void intel_update_crtcs(struct drm_atomic_state *state)
13051 {
13052         struct drm_crtc *crtc;
13053         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13054         int i;
13055
13056         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13057                 if (!new_crtc_state->active)
13058                         continue;
13059
13060                 intel_update_crtc(crtc, state, old_crtc_state,
13061                                   new_crtc_state);
13062         }
13063 }
13064
13065 static void skl_update_crtcs(struct drm_atomic_state *state)
13066 {
13067         struct drm_i915_private *dev_priv = to_i915(state->dev);
13068         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13069         struct drm_crtc *crtc;
13070         struct intel_crtc *intel_crtc;
13071         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13072         struct intel_crtc_state *cstate;
13073         unsigned int updated = 0;
13074         bool progress;
13075         enum pipe pipe;
13076         int i;
13077         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
13078         u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
13079         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
13080
13081         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
13082                 /* ignore allocations for crtc's that have been turned off. */
13083                 if (new_crtc_state->active)
13084                         entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
13085
13086         /* If 2nd DBuf slice required, enable it here */
13087         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
13088                 icl_dbuf_slices_update(dev_priv, required_slices);
13089
13090         /*
13091          * Whenever the number of active pipes changes, we need to make sure we
13092          * update the pipes in the right order so that their ddb allocations
13093          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
13094          * cause pipe underruns and other bad stuff.
13095          */
13096         do {
13097                 progress = false;
13098
13099                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13100                         bool vbl_wait = false;
13101                         unsigned int cmask = drm_crtc_mask(crtc);
13102
13103                         intel_crtc = to_intel_crtc(crtc);
13104                         cstate = to_intel_crtc_state(new_crtc_state);
13105                         pipe = intel_crtc->pipe;
13106
13107                         if (updated & cmask || !cstate->base.active)
13108                                 continue;
13109
13110                         if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
13111                                                         entries,
13112                                                         INTEL_INFO(dev_priv)->num_pipes, i))
13113                                 continue;
13114
13115                         updated |= cmask;
13116                         entries[i] = cstate->wm.skl.ddb;
13117
13118                         /*
13119                          * If this is an already active pipe, it's DDB changed,
13120                          * and this isn't the last pipe that needs updating
13121                          * then we need to wait for a vblank to pass for the
13122                          * new ddb allocation to take effect.
13123                          */
13124                         if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
13125                                                  &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
13126                             !new_crtc_state->active_changed &&
13127                             intel_state->wm_results.dirty_pipes != updated)
13128                                 vbl_wait = true;
13129
13130                         intel_update_crtc(crtc, state, old_crtc_state,
13131                                           new_crtc_state);
13132
13133                         if (vbl_wait)
13134                                 intel_wait_for_vblank(dev_priv, pipe);
13135
13136                         progress = true;
13137                 }
13138         } while (progress);
13139
13140         /* If 2nd DBuf slice is no more required disable it */
13141         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
13142                 icl_dbuf_slices_update(dev_priv, required_slices);
13143 }
13144
13145 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
13146 {
13147         struct intel_atomic_state *state, *next;
13148         struct llist_node *freed;
13149
13150         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
13151         llist_for_each_entry_safe(state, next, freed, freed)
13152                 drm_atomic_state_put(&state->base);
13153 }
13154
13155 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
13156 {
13157         struct drm_i915_private *dev_priv =
13158                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
13159
13160         intel_atomic_helper_free_state(dev_priv);
13161 }
13162
13163 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
13164 {
13165         struct wait_queue_entry wait_fence, wait_reset;
13166         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
13167
13168         init_wait_entry(&wait_fence, 0);
13169         init_wait_entry(&wait_reset, 0);
13170         for (;;) {
13171                 prepare_to_wait(&intel_state->commit_ready.wait,
13172                                 &wait_fence, TASK_UNINTERRUPTIBLE);
13173                 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
13174                                 &wait_reset, TASK_UNINTERRUPTIBLE);
13175
13176
13177                 if (i915_sw_fence_done(&intel_state->commit_ready)
13178                     || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
13179                         break;
13180
13181                 schedule();
13182         }
13183         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
13184         finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
13185 }
13186
13187 static void intel_atomic_cleanup_work(struct work_struct *work)
13188 {
13189         struct drm_atomic_state *state =
13190                 container_of(work, struct drm_atomic_state, commit_work);
13191         struct drm_i915_private *i915 = to_i915(state->dev);
13192
13193         drm_atomic_helper_cleanup_planes(&i915->drm, state);
13194         drm_atomic_helper_commit_cleanup_done(state);
13195         drm_atomic_state_put(state);
13196
13197         intel_atomic_helper_free_state(i915);
13198 }
13199
13200 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13201 {
13202         struct drm_device *dev = state->dev;
13203         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13204         struct drm_i915_private *dev_priv = to_i915(dev);
13205         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13206         struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
13207         struct drm_crtc *crtc;
13208         struct intel_crtc *intel_crtc;
13209         u64 put_domains[I915_MAX_PIPES] = {};
13210         intel_wakeref_t wakeref = 0;
13211         int i;
13212
13213         intel_atomic_commit_fence_wait(intel_state);
13214
13215         drm_atomic_helper_wait_for_dependencies(state);
13216
13217         if (intel_state->modeset)
13218                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13219
13220         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13221                 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
13222                 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13223                 intel_crtc = to_intel_crtc(crtc);
13224
13225                 if (needs_modeset(new_crtc_state) ||
13226                     to_intel_crtc_state(new_crtc_state)->update_pipe) {
13227
13228                         put_domains[intel_crtc->pipe] =
13229                                 modeset_get_crtc_power_domains(crtc,
13230                                         new_intel_crtc_state);
13231                 }
13232
13233                 if (!needs_modeset(new_crtc_state))
13234                         continue;
13235
13236                 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
13237
13238                 if (old_crtc_state->active) {
13239                         intel_crtc_disable_planes(intel_state, intel_crtc);
13240
13241                         /*
13242                          * We need to disable pipe CRC before disabling the pipe,
13243                          * or we race against vblank off.
13244                          */
13245                         intel_crtc_disable_pipe_crc(intel_crtc);
13246
13247                         dev_priv->display.crtc_disable(old_intel_crtc_state, state);
13248                         intel_crtc->active = false;
13249                         intel_fbc_disable(intel_crtc);
13250                         intel_disable_shared_dpll(old_intel_crtc_state);
13251
13252                         /*
13253                          * Underruns don't always raise
13254                          * interrupts, so check manually.
13255                          */
13256                         intel_check_cpu_fifo_underruns(dev_priv);
13257                         intel_check_pch_fifo_underruns(dev_priv);
13258
13259                         /* FIXME unify this for all platforms */
13260                         if (!new_crtc_state->active &&
13261                             !HAS_GMCH(dev_priv) &&
13262                             dev_priv->display.initial_watermarks)
13263                                 dev_priv->display.initial_watermarks(intel_state,
13264                                                                      new_intel_crtc_state);
13265                 }
13266         }
13267
13268         /* FIXME: Eventually get rid of our intel_crtc->config pointer */
13269         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
13270                 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
13271
13272         if (intel_state->modeset) {
13273                 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13274
13275                 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
13276
13277                 /*
13278                  * SKL workaround: bspec recommends we disable the SAGV when we
13279                  * have more then one pipe enabled
13280                  */
13281                 if (!intel_can_enable_sagv(state))
13282                         intel_disable_sagv(dev_priv);
13283
13284                 intel_modeset_verify_disabled(dev, state);
13285         }
13286
13287         /* Complete the events for pipes that have now been disabled */
13288         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13289                 bool modeset = needs_modeset(new_crtc_state);
13290
13291                 /* Complete events for now disable pipes here. */
13292                 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
13293                         spin_lock_irq(&dev->event_lock);
13294                         drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
13295                         spin_unlock_irq(&dev->event_lock);
13296
13297                         new_crtc_state->event = NULL;
13298                 }
13299         }
13300
13301         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13302         dev_priv->display.update_crtcs(state);
13303
13304         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13305          * already, but still need the state for the delayed optimization. To
13306          * fix this:
13307          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13308          * - schedule that vblank worker _before_ calling hw_done
13309          * - at the start of commit_tail, cancel it _synchrously
13310          * - switch over to the vblank wait helper in the core after that since
13311          *   we don't need out special handling any more.
13312          */
13313         drm_atomic_helper_wait_for_flip_done(dev, state);
13314
13315         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13316                 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13317
13318                 if (new_crtc_state->active &&
13319                     !needs_modeset(new_crtc_state) &&
13320                     (new_intel_crtc_state->base.color_mgmt_changed ||
13321                      new_intel_crtc_state->update_pipe))
13322                         intel_color_load_luts(new_intel_crtc_state);
13323         }
13324
13325         /*
13326          * Now that the vblank has passed, we can go ahead and program the
13327          * optimal watermarks on platforms that need two-step watermark
13328          * programming.
13329          *
13330          * TODO: Move this (and other cleanup) to an async worker eventually.
13331          */
13332         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13333                 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13334
13335                 if (dev_priv->display.optimize_watermarks)
13336                         dev_priv->display.optimize_watermarks(intel_state,
13337                                                               new_intel_crtc_state);
13338         }
13339
13340         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13341                 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13342
13343                 if (put_domains[i])
13344                         modeset_put_power_domains(dev_priv, put_domains[i]);
13345
13346                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
13347         }
13348
13349         if (intel_state->modeset)
13350                 intel_verify_planes(intel_state);
13351
13352         if (intel_state->modeset && intel_can_enable_sagv(state))
13353                 intel_enable_sagv(dev_priv);
13354
13355         drm_atomic_helper_commit_hw_done(state);
13356
13357         if (intel_state->modeset) {
13358                 /* As one of the primary mmio accessors, KMS has a high
13359                  * likelihood of triggering bugs in unclaimed access. After we
13360                  * finish modesetting, see if an error has been flagged, and if
13361                  * so enable debugging for the next modeset - and hope we catch
13362                  * the culprit.
13363                  */
13364                 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13365                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
13366         }
13367
13368         /*
13369          * Defer the cleanup of the old state to a separate worker to not
13370          * impede the current task (userspace for blocking modesets) that
13371          * are executed inline. For out-of-line asynchronous modesets/flips,
13372          * deferring to a new worker seems overkill, but we would place a
13373          * schedule point (cond_resched()) here anyway to keep latencies
13374          * down.
13375          */
13376         INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
13377         queue_work(system_highpri_wq, &state->commit_work);
13378 }
13379
13380 static void intel_atomic_commit_work(struct work_struct *work)
13381 {
13382         struct drm_atomic_state *state =
13383                 container_of(work, struct drm_atomic_state, commit_work);
13384
13385         intel_atomic_commit_tail(state);
13386 }
13387
13388 static int __i915_sw_fence_call
13389 intel_atomic_commit_ready(struct i915_sw_fence *fence,
13390                           enum i915_sw_fence_notify notify)
13391 {
13392         struct intel_atomic_state *state =
13393                 container_of(fence, struct intel_atomic_state, commit_ready);
13394
13395         switch (notify) {
13396         case FENCE_COMPLETE:
13397                 /* we do blocking waits in the worker, nothing to do here */
13398                 break;
13399         case FENCE_FREE:
13400                 {
13401                         struct intel_atomic_helper *helper =
13402                                 &to_i915(state->base.dev)->atomic_helper;
13403
13404                         if (llist_add(&state->freed, &helper->free_list))
13405                                 schedule_work(&helper->free_work);
13406                         break;
13407                 }
13408         }
13409
13410         return NOTIFY_DONE;
13411 }
13412
13413 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13414 {
13415         struct drm_plane_state *old_plane_state, *new_plane_state;
13416         struct drm_plane *plane;
13417         int i;
13418
13419         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
13420                 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
13421                                   intel_fb_obj(new_plane_state->fb),
13422                                   to_intel_plane(plane)->frontbuffer_bit);
13423 }
13424
13425 /**
13426  * intel_atomic_commit - commit validated state object
13427  * @dev: DRM device
13428  * @state: the top-level driver state object
13429  * @nonblock: nonblocking commit
13430  *
13431  * This function commits a top-level state object that has been validated
13432  * with drm_atomic_helper_check().
13433  *
13434  * RETURNS
13435  * Zero for success or -errno.
13436  */
13437 static int intel_atomic_commit(struct drm_device *dev,
13438                                struct drm_atomic_state *state,
13439                                bool nonblock)
13440 {
13441         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13442         struct drm_i915_private *dev_priv = to_i915(dev);
13443         int ret = 0;
13444
13445         drm_atomic_state_get(state);
13446         i915_sw_fence_init(&intel_state->commit_ready,
13447                            intel_atomic_commit_ready);
13448
13449         /*
13450          * The intel_legacy_cursor_update() fast path takes care
13451          * of avoiding the vblank waits for simple cursor
13452          * movement and flips. For cursor on/off and size changes,
13453          * we want to perform the vblank waits so that watermark
13454          * updates happen during the correct frames. Gen9+ have
13455          * double buffered watermarks and so shouldn't need this.
13456          *
13457          * Unset state->legacy_cursor_update before the call to
13458          * drm_atomic_helper_setup_commit() because otherwise
13459          * drm_atomic_helper_wait_for_flip_done() is a noop and
13460          * we get FIFO underruns because we didn't wait
13461          * for vblank.
13462          *
13463          * FIXME doing watermarks and fb cleanup from a vblank worker
13464          * (assuming we had any) would solve these problems.
13465          */
13466         if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
13467                 struct intel_crtc_state *new_crtc_state;
13468                 struct intel_crtc *crtc;
13469                 int i;
13470
13471                 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
13472                         if (new_crtc_state->wm.need_postvbl_update ||
13473                             new_crtc_state->update_wm_post)
13474                                 state->legacy_cursor_update = false;
13475         }
13476
13477         ret = intel_atomic_prepare_commit(dev, state);
13478         if (ret) {
13479                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13480                 i915_sw_fence_commit(&intel_state->commit_ready);
13481                 return ret;
13482         }
13483
13484         ret = drm_atomic_helper_setup_commit(state, nonblock);
13485         if (!ret)
13486                 ret = drm_atomic_helper_swap_state(state, true);
13487
13488         if (ret) {
13489                 i915_sw_fence_commit(&intel_state->commit_ready);
13490
13491                 drm_atomic_helper_cleanup_planes(dev, state);
13492                 return ret;
13493         }
13494         dev_priv->wm.distrust_bios_wm = false;
13495         intel_shared_dpll_swap_state(state);
13496         intel_atomic_track_fbs(state);
13497
13498         if (intel_state->modeset) {
13499                 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
13500                        sizeof(intel_state->min_cdclk));
13501                 memcpy(dev_priv->min_voltage_level,
13502                        intel_state->min_voltage_level,
13503                        sizeof(intel_state->min_voltage_level));
13504                 dev_priv->active_crtcs = intel_state->active_crtcs;
13505                 dev_priv->cdclk.logical = intel_state->cdclk.logical;
13506                 dev_priv->cdclk.actual = intel_state->cdclk.actual;
13507         }
13508
13509         drm_atomic_state_get(state);
13510         INIT_WORK(&state->commit_work, intel_atomic_commit_work);
13511
13512         i915_sw_fence_commit(&intel_state->commit_ready);
13513         if (nonblock && intel_state->modeset) {
13514                 queue_work(dev_priv->modeset_wq, &state->commit_work);
13515         } else if (nonblock) {
13516                 queue_work(system_unbound_wq, &state->commit_work);
13517         } else {
13518                 if (intel_state->modeset)
13519                         flush_workqueue(dev_priv->modeset_wq);
13520                 intel_atomic_commit_tail(state);
13521         }
13522
13523         return 0;
13524 }
13525
13526 static const struct drm_crtc_funcs intel_crtc_funcs = {
13527         .gamma_set = drm_atomic_helper_legacy_gamma_set,
13528         .set_config = drm_atomic_helper_set_config,
13529         .destroy = intel_crtc_destroy,
13530         .page_flip = drm_atomic_helper_page_flip,
13531         .atomic_duplicate_state = intel_crtc_duplicate_state,
13532         .atomic_destroy_state = intel_crtc_destroy_state,
13533         .set_crc_source = intel_crtc_set_crc_source,
13534         .verify_crc_source = intel_crtc_verify_crc_source,
13535         .get_crc_sources = intel_crtc_get_crc_sources,
13536 };
13537
13538 struct wait_rps_boost {
13539         struct wait_queue_entry wait;
13540
13541         struct drm_crtc *crtc;
13542         struct i915_request *request;
13543 };
13544
13545 static int do_rps_boost(struct wait_queue_entry *_wait,
13546                         unsigned mode, int sync, void *key)
13547 {
13548         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
13549         struct i915_request *rq = wait->request;
13550
13551         /*
13552          * If we missed the vblank, but the request is already running it
13553          * is reasonable to assume that it will complete before the next
13554          * vblank without our intervention, so leave RPS alone.
13555          */
13556         if (!i915_request_started(rq))
13557                 gen6_rps_boost(rq, NULL);
13558         i915_request_put(rq);
13559
13560         drm_crtc_vblank_put(wait->crtc);
13561
13562         list_del(&wait->wait.entry);
13563         kfree(wait);
13564         return 1;
13565 }
13566
13567 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
13568                                        struct dma_fence *fence)
13569 {
13570         struct wait_rps_boost *wait;
13571
13572         if (!dma_fence_is_i915(fence))
13573                 return;
13574
13575         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
13576                 return;
13577
13578         if (drm_crtc_vblank_get(crtc))
13579                 return;
13580
13581         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
13582         if (!wait) {
13583                 drm_crtc_vblank_put(crtc);
13584                 return;
13585         }
13586
13587         wait->request = to_request(dma_fence_get(fence));
13588         wait->crtc = crtc;
13589
13590         wait->wait.func = do_rps_boost;
13591         wait->wait.flags = 0;
13592
13593         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
13594 }
13595
13596 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
13597 {
13598         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
13599         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
13600         struct drm_framebuffer *fb = plane_state->base.fb;
13601         struct i915_vma *vma;
13602
13603         if (plane->id == PLANE_CURSOR &&
13604             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
13605                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13606                 const int align = intel_cursor_alignment(dev_priv);
13607                 int err;
13608
13609                 err = i915_gem_object_attach_phys(obj, align);
13610                 if (err)
13611                         return err;
13612         }
13613
13614         vma = intel_pin_and_fence_fb_obj(fb,
13615                                          &plane_state->view,
13616                                          intel_plane_uses_fence(plane_state),
13617                                          &plane_state->flags);
13618         if (IS_ERR(vma))
13619                 return PTR_ERR(vma);
13620
13621         plane_state->vma = vma;
13622
13623         return 0;
13624 }
13625
13626 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
13627 {
13628         struct i915_vma *vma;
13629
13630         vma = fetch_and_zero(&old_plane_state->vma);
13631         if (vma)
13632                 intel_unpin_fb_vma(vma, old_plane_state->flags);
13633 }
13634
13635 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
13636 {
13637         struct i915_sched_attr attr = {
13638                 .priority = I915_PRIORITY_DISPLAY,
13639         };
13640
13641         i915_gem_object_wait_priority(obj, 0, &attr);
13642 }
13643
13644 /**
13645  * intel_prepare_plane_fb - Prepare fb for usage on plane
13646  * @plane: drm plane to prepare for
13647  * @new_state: the plane state being prepared
13648  *
13649  * Prepares a framebuffer for usage on a display plane.  Generally this
13650  * involves pinning the underlying object and updating the frontbuffer tracking
13651  * bits.  Some older platforms need special physical address handling for
13652  * cursor planes.
13653  *
13654  * Must be called with struct_mutex held.
13655  *
13656  * Returns 0 on success, negative error code on failure.
13657  */
13658 int
13659 intel_prepare_plane_fb(struct drm_plane *plane,
13660                        struct drm_plane_state *new_state)
13661 {
13662         struct intel_atomic_state *intel_state =
13663                 to_intel_atomic_state(new_state->state);
13664         struct drm_i915_private *dev_priv = to_i915(plane->dev);
13665         struct drm_framebuffer *fb = new_state->fb;
13666         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13667         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13668         int ret;
13669
13670         if (old_obj) {
13671                 struct drm_crtc_state *crtc_state =
13672                         drm_atomic_get_new_crtc_state(new_state->state,
13673                                                       plane->state->crtc);
13674
13675                 /* Big Hammer, we also need to ensure that any pending
13676                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13677                  * current scanout is retired before unpinning the old
13678                  * framebuffer. Note that we rely on userspace rendering
13679                  * into the buffer attached to the pipe they are waiting
13680                  * on. If not, userspace generates a GPU hang with IPEHR
13681                  * point to the MI_WAIT_FOR_EVENT.
13682                  *
13683                  * This should only fail upon a hung GPU, in which case we
13684                  * can safely continue.
13685                  */
13686                 if (needs_modeset(crtc_state)) {
13687                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13688                                                               old_obj->resv, NULL,
13689                                                               false, 0,
13690                                                               GFP_KERNEL);
13691                         if (ret < 0)
13692                                 return ret;
13693                 }
13694         }
13695
13696         if (new_state->fence) { /* explicit fencing */
13697                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
13698                                                     new_state->fence,
13699                                                     I915_FENCE_TIMEOUT,
13700                                                     GFP_KERNEL);
13701                 if (ret < 0)
13702                         return ret;
13703         }
13704
13705         if (!obj)
13706                 return 0;
13707
13708         ret = i915_gem_object_pin_pages(obj);
13709         if (ret)
13710                 return ret;
13711
13712         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13713         if (ret) {
13714                 i915_gem_object_unpin_pages(obj);
13715                 return ret;
13716         }
13717
13718         ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
13719
13720         mutex_unlock(&dev_priv->drm.struct_mutex);
13721         i915_gem_object_unpin_pages(obj);
13722         if (ret)
13723                 return ret;
13724
13725         fb_obj_bump_render_priority(obj);
13726         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
13727
13728         if (!new_state->fence) { /* implicit fencing */
13729                 struct dma_fence *fence;
13730
13731                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13732                                                       obj->resv, NULL,
13733                                                       false, I915_FENCE_TIMEOUT,
13734                                                       GFP_KERNEL);
13735                 if (ret < 0)
13736                         return ret;
13737
13738                 fence = reservation_object_get_excl_rcu(obj->resv);
13739                 if (fence) {
13740                         add_rps_boost_after_vblank(new_state->crtc, fence);
13741                         dma_fence_put(fence);
13742                 }
13743         } else {
13744                 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
13745         }
13746
13747         /*
13748          * We declare pageflips to be interactive and so merit a small bias
13749          * towards upclocking to deliver the frame on time. By only changing
13750          * the RPS thresholds to sample more regularly and aim for higher
13751          * clocks we can hopefully deliver low power workloads (like kodi)
13752          * that are not quite steady state without resorting to forcing
13753          * maximum clocks following a vblank miss (see do_rps_boost()).
13754          */
13755         if (!intel_state->rps_interactive) {
13756                 intel_rps_mark_interactive(dev_priv, true);
13757                 intel_state->rps_interactive = true;
13758         }
13759
13760         return 0;
13761 }
13762
13763 /**
13764  * intel_cleanup_plane_fb - Cleans up an fb after plane use
13765  * @plane: drm plane to clean up for
13766  * @old_state: the state from the previous modeset
13767  *
13768  * Cleans up a framebuffer that has just been removed from a plane.
13769  *
13770  * Must be called with struct_mutex held.
13771  */
13772 void
13773 intel_cleanup_plane_fb(struct drm_plane *plane,
13774                        struct drm_plane_state *old_state)
13775 {
13776         struct intel_atomic_state *intel_state =
13777                 to_intel_atomic_state(old_state->state);
13778         struct drm_i915_private *dev_priv = to_i915(plane->dev);
13779
13780         if (intel_state->rps_interactive) {
13781                 intel_rps_mark_interactive(dev_priv, false);
13782                 intel_state->rps_interactive = false;
13783         }
13784
13785         /* Should only be called after a successful intel_prepare_plane_fb()! */
13786         mutex_lock(&dev_priv->drm.struct_mutex);
13787         intel_plane_unpin_fb(to_intel_plane_state(old_state));
13788         mutex_unlock(&dev_priv->drm.struct_mutex);
13789 }
13790
13791 int
13792 skl_max_scale(const struct intel_crtc_state *crtc_state,
13793               u32 pixel_format)
13794 {
13795         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13796         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13797         int max_scale, mult;
13798         int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
13799
13800         if (!crtc_state->base.enable)
13801                 return DRM_PLANE_HELPER_NO_SCALING;
13802
13803         crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13804         max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
13805
13806         if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
13807                 max_dotclk *= 2;
13808
13809         if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
13810                 return DRM_PLANE_HELPER_NO_SCALING;
13811
13812         /*
13813          * skl max scale is lower of:
13814          *    close to 3 but not 3, -1 is for that purpose
13815          *            or
13816          *    cdclk/crtc_clock
13817          */
13818         mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
13819         tmpclk1 = (1 << 16) * mult - 1;
13820         tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
13821         max_scale = min(tmpclk1, tmpclk2);
13822
13823         return max_scale;
13824 }
13825
13826 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13827                                     struct drm_crtc_state *old_crtc_state)
13828 {
13829         struct drm_device *dev = crtc->dev;
13830         struct drm_i915_private *dev_priv = to_i915(dev);
13831         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13832         struct intel_crtc_state *old_intel_cstate =
13833                 to_intel_crtc_state(old_crtc_state);
13834         struct intel_atomic_state *old_intel_state =
13835                 to_intel_atomic_state(old_crtc_state->state);
13836         struct intel_crtc_state *intel_cstate =
13837                 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13838         bool modeset = needs_modeset(&intel_cstate->base);
13839
13840         /* Perform vblank evasion around commit operation */
13841         intel_pipe_update_start(intel_cstate);
13842
13843         if (modeset)
13844                 goto out;
13845
13846         if (intel_cstate->base.color_mgmt_changed ||
13847             intel_cstate->update_pipe)
13848                 intel_color_commit(intel_cstate);
13849
13850         if (intel_cstate->update_pipe)
13851                 intel_update_pipe_config(old_intel_cstate, intel_cstate);
13852         else if (INTEL_GEN(dev_priv) >= 9)
13853                 skl_detach_scalers(intel_cstate);
13854
13855 out:
13856         if (dev_priv->display.atomic_update_watermarks)
13857                 dev_priv->display.atomic_update_watermarks(old_intel_state,
13858                                                            intel_cstate);
13859 }
13860
13861 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
13862                                   struct intel_crtc_state *crtc_state)
13863 {
13864         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13865
13866         if (!IS_GEN(dev_priv, 2))
13867                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
13868
13869         if (crtc_state->has_pch_encoder) {
13870                 enum pipe pch_transcoder =
13871                         intel_crtc_pch_transcoder(crtc);
13872
13873                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
13874         }
13875 }
13876
13877 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13878                                      struct drm_crtc_state *old_crtc_state)
13879 {
13880         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13881         struct intel_atomic_state *old_intel_state =
13882                 to_intel_atomic_state(old_crtc_state->state);
13883         struct intel_crtc_state *new_crtc_state =
13884                 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13885
13886         intel_pipe_update_end(new_crtc_state);
13887
13888         if (new_crtc_state->update_pipe &&
13889             !needs_modeset(&new_crtc_state->base) &&
13890             old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
13891                 intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
13892 }
13893
13894 /**
13895  * intel_plane_destroy - destroy a plane
13896  * @plane: plane to destroy
13897  *
13898  * Common destruction function for all types of planes (primary, cursor,
13899  * sprite).
13900  */
13901 void intel_plane_destroy(struct drm_plane *plane)
13902 {
13903         drm_plane_cleanup(plane);
13904         kfree(to_intel_plane(plane));
13905 }
13906
13907 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
13908                                             u32 format, u64 modifier)
13909 {
13910         switch (modifier) {
13911         case DRM_FORMAT_MOD_LINEAR:
13912         case I915_FORMAT_MOD_X_TILED:
13913                 break;
13914         default:
13915                 return false;
13916         }
13917
13918         switch (format) {
13919         case DRM_FORMAT_C8:
13920         case DRM_FORMAT_RGB565:
13921         case DRM_FORMAT_XRGB1555:
13922         case DRM_FORMAT_XRGB8888:
13923                 return modifier == DRM_FORMAT_MOD_LINEAR ||
13924                         modifier == I915_FORMAT_MOD_X_TILED;
13925         default:
13926                 return false;
13927         }
13928 }
13929
13930 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
13931                                             u32 format, u64 modifier)
13932 {
13933         switch (modifier) {
13934         case DRM_FORMAT_MOD_LINEAR:
13935         case I915_FORMAT_MOD_X_TILED:
13936                 break;
13937         default:
13938                 return false;
13939         }
13940
13941         switch (format) {
13942         case DRM_FORMAT_C8:
13943         case DRM_FORMAT_RGB565:
13944         case DRM_FORMAT_XRGB8888:
13945         case DRM_FORMAT_XBGR8888:
13946         case DRM_FORMAT_XRGB2101010:
13947         case DRM_FORMAT_XBGR2101010:
13948                 return modifier == DRM_FORMAT_MOD_LINEAR ||
13949                         modifier == I915_FORMAT_MOD_X_TILED;
13950         default:
13951                 return false;
13952         }
13953 }
13954
13955 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
13956                                               u32 format, u64 modifier)
13957 {
13958         return modifier == DRM_FORMAT_MOD_LINEAR &&
13959                 format == DRM_FORMAT_ARGB8888;
13960 }
13961
13962 static const struct drm_plane_funcs i965_plane_funcs = {
13963         .update_plane = drm_atomic_helper_update_plane,
13964         .disable_plane = drm_atomic_helper_disable_plane,
13965         .destroy = intel_plane_destroy,
13966         .atomic_get_property = intel_plane_atomic_get_property,
13967         .atomic_set_property = intel_plane_atomic_set_property,
13968         .atomic_duplicate_state = intel_plane_duplicate_state,
13969         .atomic_destroy_state = intel_plane_destroy_state,
13970         .format_mod_supported = i965_plane_format_mod_supported,
13971 };
13972
13973 static const struct drm_plane_funcs i8xx_plane_funcs = {
13974         .update_plane = drm_atomic_helper_update_plane,
13975         .disable_plane = drm_atomic_helper_disable_plane,
13976         .destroy = intel_plane_destroy,
13977         .atomic_get_property = intel_plane_atomic_get_property,
13978         .atomic_set_property = intel_plane_atomic_set_property,
13979         .atomic_duplicate_state = intel_plane_duplicate_state,
13980         .atomic_destroy_state = intel_plane_destroy_state,
13981         .format_mod_supported = i8xx_plane_format_mod_supported,
13982 };
13983
13984 static int
13985 intel_legacy_cursor_update(struct drm_plane *plane,
13986                            struct drm_crtc *crtc,
13987                            struct drm_framebuffer *fb,
13988                            int crtc_x, int crtc_y,
13989                            unsigned int crtc_w, unsigned int crtc_h,
13990                            u32 src_x, u32 src_y,
13991                            u32 src_w, u32 src_h,
13992                            struct drm_modeset_acquire_ctx *ctx)
13993 {
13994         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
13995         int ret;
13996         struct drm_plane_state *old_plane_state, *new_plane_state;
13997         struct intel_plane *intel_plane = to_intel_plane(plane);
13998         struct drm_framebuffer *old_fb;
13999         struct intel_crtc_state *crtc_state =
14000                 to_intel_crtc_state(crtc->state);
14001         struct intel_crtc_state *new_crtc_state;
14002
14003         /*
14004          * When crtc is inactive or there is a modeset pending,
14005          * wait for it to complete in the slowpath
14006          */
14007         if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
14008             crtc_state->update_pipe)
14009                 goto slow;
14010
14011         old_plane_state = plane->state;
14012         /*
14013          * Don't do an async update if there is an outstanding commit modifying
14014          * the plane.  This prevents our async update's changes from getting
14015          * overridden by a previous synchronous update's state.
14016          */
14017         if (old_plane_state->commit &&
14018             !try_wait_for_completion(&old_plane_state->commit->hw_done))
14019                 goto slow;
14020
14021         /*
14022          * If any parameters change that may affect watermarks,
14023          * take the slowpath. Only changing fb or position should be
14024          * in the fastpath.
14025          */
14026         if (old_plane_state->crtc != crtc ||
14027             old_plane_state->src_w != src_w ||
14028             old_plane_state->src_h != src_h ||
14029             old_plane_state->crtc_w != crtc_w ||
14030             old_plane_state->crtc_h != crtc_h ||
14031             !old_plane_state->fb != !fb)
14032                 goto slow;
14033
14034         new_plane_state = intel_plane_duplicate_state(plane);
14035         if (!new_plane_state)
14036                 return -ENOMEM;
14037
14038         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
14039         if (!new_crtc_state) {
14040                 ret = -ENOMEM;
14041                 goto out_free;
14042         }
14043
14044         drm_atomic_set_fb_for_plane(new_plane_state, fb);
14045
14046         new_plane_state->src_x = src_x;
14047         new_plane_state->src_y = src_y;
14048         new_plane_state->src_w = src_w;
14049         new_plane_state->src_h = src_h;
14050         new_plane_state->crtc_x = crtc_x;
14051         new_plane_state->crtc_y = crtc_y;
14052         new_plane_state->crtc_w = crtc_w;
14053         new_plane_state->crtc_h = crtc_h;
14054
14055         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
14056                                                   to_intel_plane_state(old_plane_state),
14057                                                   to_intel_plane_state(new_plane_state));
14058         if (ret)
14059                 goto out_free;
14060
14061         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14062         if (ret)
14063                 goto out_free;
14064
14065         ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
14066         if (ret)
14067                 goto out_unlock;
14068
14069         intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
14070
14071         old_fb = old_plane_state->fb;
14072         i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
14073                           intel_plane->frontbuffer_bit);
14074
14075         /* Swap plane state */
14076         plane->state = new_plane_state;
14077
14078         /*
14079          * We cannot swap crtc_state as it may be in use by an atomic commit or
14080          * page flip that's running simultaneously. If we swap crtc_state and
14081          * destroy the old state, we will cause a use-after-free there.
14082          *
14083          * Only update active_planes, which is needed for our internal
14084          * bookkeeping. Either value will do the right thing when updating
14085          * planes atomically. If the cursor was part of the atomic update then
14086          * we would have taken the slowpath.
14087          */
14088         crtc_state->active_planes = new_crtc_state->active_planes;
14089
14090         if (plane->state->visible) {
14091                 trace_intel_update_plane(plane, to_intel_crtc(crtc));
14092                 intel_plane->update_plane(intel_plane, crtc_state,
14093                                           to_intel_plane_state(plane->state));
14094         } else {
14095                 trace_intel_disable_plane(plane, to_intel_crtc(crtc));
14096                 intel_plane->disable_plane(intel_plane, crtc_state);
14097         }
14098
14099         intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
14100
14101 out_unlock:
14102         mutex_unlock(&dev_priv->drm.struct_mutex);
14103 out_free:
14104         if (new_crtc_state)
14105                 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
14106         if (ret)
14107                 intel_plane_destroy_state(plane, new_plane_state);
14108         else
14109                 intel_plane_destroy_state(plane, old_plane_state);
14110         return ret;
14111
14112 slow:
14113         return drm_atomic_helper_update_plane(plane, crtc, fb,
14114                                               crtc_x, crtc_y, crtc_w, crtc_h,
14115                                               src_x, src_y, src_w, src_h, ctx);
14116 }
14117
14118 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
14119         .update_plane = intel_legacy_cursor_update,
14120         .disable_plane = drm_atomic_helper_disable_plane,
14121         .destroy = intel_plane_destroy,
14122         .atomic_get_property = intel_plane_atomic_get_property,
14123         .atomic_set_property = intel_plane_atomic_set_property,
14124         .atomic_duplicate_state = intel_plane_duplicate_state,
14125         .atomic_destroy_state = intel_plane_destroy_state,
14126         .format_mod_supported = intel_cursor_format_mod_supported,
14127 };
14128
14129 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
14130                                enum i9xx_plane_id i9xx_plane)
14131 {
14132         if (!HAS_FBC(dev_priv))
14133                 return false;
14134
14135         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14136                 return i9xx_plane == PLANE_A; /* tied to pipe A */
14137         else if (IS_IVYBRIDGE(dev_priv))
14138                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
14139                         i9xx_plane == PLANE_C;
14140         else if (INTEL_GEN(dev_priv) >= 4)
14141                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
14142         else
14143                 return i9xx_plane == PLANE_A;
14144 }
14145
14146 static struct intel_plane *
14147 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
14148 {
14149         struct intel_plane *plane;
14150         const struct drm_plane_funcs *plane_funcs;
14151         unsigned int supported_rotations;
14152         unsigned int possible_crtcs;
14153         const u64 *modifiers;
14154         const u32 *formats;
14155         int num_formats;
14156         int ret;
14157
14158         if (INTEL_GEN(dev_priv) >= 9)
14159                 return skl_universal_plane_create(dev_priv, pipe,
14160                                                   PLANE_PRIMARY);
14161
14162         plane = intel_plane_alloc();
14163         if (IS_ERR(plane))
14164                 return plane;
14165
14166         plane->pipe = pipe;
14167         /*
14168          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
14169          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
14170          */
14171         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
14172                 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
14173         else
14174                 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
14175         plane->id = PLANE_PRIMARY;
14176         plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
14177
14178         plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
14179         if (plane->has_fbc) {
14180                 struct intel_fbc *fbc = &dev_priv->fbc;
14181
14182                 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
14183         }
14184
14185         if (INTEL_GEN(dev_priv) >= 4) {
14186                 formats = i965_primary_formats;
14187                 num_formats = ARRAY_SIZE(i965_primary_formats);
14188                 modifiers = i9xx_format_modifiers;
14189
14190                 plane->max_stride = i9xx_plane_max_stride;
14191                 plane->update_plane = i9xx_update_plane;
14192                 plane->disable_plane = i9xx_disable_plane;
14193                 plane->get_hw_state = i9xx_plane_get_hw_state;
14194                 plane->check_plane = i9xx_plane_check;
14195
14196                 plane_funcs = &i965_plane_funcs;
14197         } else {
14198                 formats = i8xx_primary_formats;
14199                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
14200                 modifiers = i9xx_format_modifiers;
14201
14202                 plane->max_stride = i9xx_plane_max_stride;
14203                 plane->update_plane = i9xx_update_plane;
14204                 plane->disable_plane = i9xx_disable_plane;
14205                 plane->get_hw_state = i9xx_plane_get_hw_state;
14206                 plane->check_plane = i9xx_plane_check;
14207
14208                 plane_funcs = &i8xx_plane_funcs;
14209         }
14210
14211         possible_crtcs = BIT(pipe);
14212
14213         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
14214                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14215                                                possible_crtcs, plane_funcs,
14216                                                formats, num_formats, modifiers,
14217                                                DRM_PLANE_TYPE_PRIMARY,
14218                                                "primary %c", pipe_name(pipe));
14219         else
14220                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14221                                                possible_crtcs, plane_funcs,
14222                                                formats, num_formats, modifiers,
14223                                                DRM_PLANE_TYPE_PRIMARY,
14224                                                "plane %c",
14225                                                plane_name(plane->i9xx_plane));
14226         if (ret)
14227                 goto fail;
14228
14229         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
14230                 supported_rotations =
14231                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
14232                         DRM_MODE_REFLECT_X;
14233         } else if (INTEL_GEN(dev_priv) >= 4) {
14234                 supported_rotations =
14235                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
14236         } else {
14237                 supported_rotations = DRM_MODE_ROTATE_0;
14238         }
14239
14240         if (INTEL_GEN(dev_priv) >= 4)
14241                 drm_plane_create_rotation_property(&plane->base,
14242                                                    DRM_MODE_ROTATE_0,
14243                                                    supported_rotations);
14244
14245         drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
14246
14247         return plane;
14248
14249 fail:
14250         intel_plane_free(plane);
14251
14252         return ERR_PTR(ret);
14253 }
14254
14255 static struct intel_plane *
14256 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
14257                           enum pipe pipe)
14258 {
14259         unsigned int possible_crtcs;
14260         struct intel_plane *cursor;
14261         int ret;
14262
14263         cursor = intel_plane_alloc();
14264         if (IS_ERR(cursor))
14265                 return cursor;
14266
14267         cursor->pipe = pipe;
14268         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
14269         cursor->id = PLANE_CURSOR;
14270         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
14271
14272         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
14273                 cursor->max_stride = i845_cursor_max_stride;
14274                 cursor->update_plane = i845_update_cursor;
14275                 cursor->disable_plane = i845_disable_cursor;
14276                 cursor->get_hw_state = i845_cursor_get_hw_state;
14277                 cursor->check_plane = i845_check_cursor;
14278         } else {
14279                 cursor->max_stride = i9xx_cursor_max_stride;
14280                 cursor->update_plane = i9xx_update_cursor;
14281                 cursor->disable_plane = i9xx_disable_cursor;
14282                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
14283                 cursor->check_plane = i9xx_check_cursor;
14284         }
14285
14286         cursor->cursor.base = ~0;
14287         cursor->cursor.cntl = ~0;
14288
14289         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
14290                 cursor->cursor.size = ~0;
14291
14292         possible_crtcs = BIT(pipe);
14293
14294         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
14295                                        possible_crtcs, &intel_cursor_plane_funcs,
14296                                        intel_cursor_formats,
14297                                        ARRAY_SIZE(intel_cursor_formats),
14298                                        cursor_format_modifiers,
14299                                        DRM_PLANE_TYPE_CURSOR,
14300                                        "cursor %c", pipe_name(pipe));
14301         if (ret)
14302                 goto fail;
14303
14304         if (INTEL_GEN(dev_priv) >= 4)
14305                 drm_plane_create_rotation_property(&cursor->base,
14306                                                    DRM_MODE_ROTATE_0,
14307                                                    DRM_MODE_ROTATE_0 |
14308                                                    DRM_MODE_ROTATE_180);
14309
14310         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14311
14312         return cursor;
14313
14314 fail:
14315         intel_plane_free(cursor);
14316
14317         return ERR_PTR(ret);
14318 }
14319
14320 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
14321                                     struct intel_crtc_state *crtc_state)
14322 {
14323         struct intel_crtc_scaler_state *scaler_state =
14324                 &crtc_state->scaler_state;
14325         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14326         int i;
14327
14328         crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
14329         if (!crtc->num_scalers)
14330                 return;
14331
14332         for (i = 0; i < crtc->num_scalers; i++) {
14333                 struct intel_scaler *scaler = &scaler_state->scalers[i];
14334
14335                 scaler->in_use = 0;
14336                 scaler->mode = 0;
14337         }
14338
14339         scaler_state->scaler_id = -1;
14340 }
14341
14342 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
14343 {
14344         struct intel_crtc *intel_crtc;
14345         struct intel_crtc_state *crtc_state = NULL;
14346         struct intel_plane *primary = NULL;
14347         struct intel_plane *cursor = NULL;
14348         int sprite, ret;
14349
14350         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14351         if (!intel_crtc)
14352                 return -ENOMEM;
14353
14354         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14355         if (!crtc_state) {
14356                 ret = -ENOMEM;
14357                 goto fail;
14358         }
14359         intel_crtc->config = crtc_state;
14360         intel_crtc->base.state = &crtc_state->base;
14361         crtc_state->base.crtc = &intel_crtc->base;
14362
14363         primary = intel_primary_plane_create(dev_priv, pipe);
14364         if (IS_ERR(primary)) {
14365                 ret = PTR_ERR(primary);
14366                 goto fail;
14367         }
14368         intel_crtc->plane_ids_mask |= BIT(primary->id);
14369
14370         for_each_sprite(dev_priv, pipe, sprite) {
14371                 struct intel_plane *plane;
14372
14373                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
14374                 if (IS_ERR(plane)) {
14375                         ret = PTR_ERR(plane);
14376                         goto fail;
14377                 }
14378                 intel_crtc->plane_ids_mask |= BIT(plane->id);
14379         }
14380
14381         cursor = intel_cursor_plane_create(dev_priv, pipe);
14382         if (IS_ERR(cursor)) {
14383                 ret = PTR_ERR(cursor);
14384                 goto fail;
14385         }
14386         intel_crtc->plane_ids_mask |= BIT(cursor->id);
14387
14388         ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
14389                                         &primary->base, &cursor->base,
14390                                         &intel_crtc_funcs,
14391                                         "pipe %c", pipe_name(pipe));
14392         if (ret)
14393                 goto fail;
14394
14395         intel_crtc->pipe = pipe;
14396
14397         /* initialize shared scalers */
14398         intel_crtc_init_scalers(intel_crtc, crtc_state);
14399
14400         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
14401                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
14402         dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
14403
14404         if (INTEL_GEN(dev_priv) < 9) {
14405                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
14406
14407                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14408                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
14409                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
14410         }
14411
14412         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14413
14414         intel_color_init(intel_crtc);
14415
14416         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14417
14418         return 0;
14419
14420 fail:
14421         /*
14422          * drm_mode_config_cleanup() will free up any
14423          * crtcs/planes already initialized.
14424          */
14425         kfree(crtc_state);
14426         kfree(intel_crtc);
14427
14428         return ret;
14429 }
14430
14431 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14432                                       struct drm_file *file)
14433 {
14434         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14435         struct drm_crtc *drmmode_crtc;
14436         struct intel_crtc *crtc;
14437
14438         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
14439         if (!drmmode_crtc)
14440                 return -ENOENT;
14441
14442         crtc = to_intel_crtc(drmmode_crtc);
14443         pipe_from_crtc_id->pipe = crtc->pipe;
14444
14445         return 0;
14446 }
14447
14448 static int intel_encoder_clones(struct intel_encoder *encoder)
14449 {
14450         struct drm_device *dev = encoder->base.dev;
14451         struct intel_encoder *source_encoder;
14452         int index_mask = 0;
14453         int entry = 0;
14454
14455         for_each_intel_encoder(dev, source_encoder) {
14456                 if (encoders_cloneable(encoder, source_encoder))
14457                         index_mask |= (1 << entry);
14458
14459                 entry++;
14460         }
14461
14462         return index_mask;
14463 }
14464
14465 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
14466 {
14467         if (!IS_MOBILE(dev_priv))
14468                 return false;
14469
14470         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14471                 return false;
14472
14473         if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14474                 return false;
14475
14476         return true;
14477 }
14478
14479 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
14480 {
14481         if (INTEL_GEN(dev_priv) >= 9)
14482                 return false;
14483
14484         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
14485                 return false;
14486
14487         if (HAS_PCH_LPT_H(dev_priv) &&
14488             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14489                 return false;
14490
14491         /* DDI E can't be used if DDI A requires 4 lanes */
14492         if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14493                 return false;
14494
14495         if (!dev_priv->vbt.int_crt_support)
14496                 return false;
14497
14498         return true;
14499 }
14500
14501 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14502 {
14503         int pps_num;
14504         int pps_idx;
14505
14506         if (HAS_DDI(dev_priv))
14507                 return;
14508         /*
14509          * This w/a is needed at least on CPT/PPT, but to be sure apply it
14510          * everywhere where registers can be write protected.
14511          */
14512         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14513                 pps_num = 2;
14514         else
14515                 pps_num = 1;
14516
14517         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
14518                 u32 val = I915_READ(PP_CONTROL(pps_idx));
14519
14520                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
14521                 I915_WRITE(PP_CONTROL(pps_idx), val);
14522         }
14523 }
14524
14525 static void intel_pps_init(struct drm_i915_private *dev_priv)
14526 {
14527         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
14528                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
14529         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14530                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
14531         else
14532                 dev_priv->pps_mmio_base = PPS_BASE;
14533
14534         intel_pps_unlock_regs_wa(dev_priv);
14535 }
14536
14537 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
14538 {
14539         struct intel_encoder *encoder;
14540         bool dpd_is_edp = false;
14541
14542         intel_pps_init(dev_priv);
14543
14544         if (!HAS_DISPLAY(dev_priv))
14545                 return;
14546
14547         if (IS_ICELAKE(dev_priv)) {
14548                 intel_ddi_init(dev_priv, PORT_A);
14549                 intel_ddi_init(dev_priv, PORT_B);
14550                 intel_ddi_init(dev_priv, PORT_C);
14551                 intel_ddi_init(dev_priv, PORT_D);
14552                 intel_ddi_init(dev_priv, PORT_E);
14553                 /*
14554                  * On some ICL SKUs port F is not present. No strap bits for
14555                  * this, so rely on VBT.
14556                  * Work around broken VBTs on SKUs known to have no port F.
14557                  */
14558                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
14559                     intel_bios_is_port_present(dev_priv, PORT_F))
14560                         intel_ddi_init(dev_priv, PORT_F);
14561
14562                 icl_dsi_init(dev_priv);
14563         } else if (IS_GEN9_LP(dev_priv)) {
14564                 /*
14565                  * FIXME: Broxton doesn't support port detection via the
14566                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14567                  * detect the ports.
14568                  */
14569                 intel_ddi_init(dev_priv, PORT_A);
14570                 intel_ddi_init(dev_priv, PORT_B);
14571                 intel_ddi_init(dev_priv, PORT_C);
14572
14573                 vlv_dsi_init(dev_priv);
14574         } else if (HAS_DDI(dev_priv)) {
14575                 int found;
14576
14577                 if (intel_ddi_crt_present(dev_priv))
14578                         intel_crt_init(dev_priv);
14579
14580                 /*
14581                  * Haswell uses DDI functions to detect digital outputs.
14582                  * On SKL pre-D0 the strap isn't connected, so we assume
14583                  * it's there.
14584                  */
14585                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14586                 /* WaIgnoreDDIAStrap: skl */
14587                 if (found || IS_GEN9_BC(dev_priv))
14588                         intel_ddi_init(dev_priv, PORT_A);
14589
14590                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
14591                  * register */
14592                 found = I915_READ(SFUSE_STRAP);
14593
14594                 if (found & SFUSE_STRAP_DDIB_DETECTED)
14595                         intel_ddi_init(dev_priv, PORT_B);
14596                 if (found & SFUSE_STRAP_DDIC_DETECTED)
14597                         intel_ddi_init(dev_priv, PORT_C);
14598                 if (found & SFUSE_STRAP_DDID_DETECTED)
14599                         intel_ddi_init(dev_priv, PORT_D);
14600                 if (found & SFUSE_STRAP_DDIF_DETECTED)
14601                         intel_ddi_init(dev_priv, PORT_F);
14602                 /*
14603                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14604                  */
14605                 if (IS_GEN9_BC(dev_priv) &&
14606                     intel_bios_is_port_present(dev_priv, PORT_E))
14607                         intel_ddi_init(dev_priv, PORT_E);
14608
14609         } else if (HAS_PCH_SPLIT(dev_priv)) {
14610                 int found;
14611
14612                 /*
14613                  * intel_edp_init_connector() depends on this completing first,
14614                  * to prevent the registration of both eDP and LVDS and the
14615                  * incorrect sharing of the PPS.
14616                  */
14617                 intel_lvds_init(dev_priv);
14618                 intel_crt_init(dev_priv);
14619
14620                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
14621
14622                 if (ilk_has_edp_a(dev_priv))
14623                         intel_dp_init(dev_priv, DP_A, PORT_A);
14624
14625                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14626                         /* PCH SDVOB multiplex with HDMIB */
14627                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
14628                         if (!found)
14629                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
14630                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14631                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
14632                 }
14633
14634                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14635                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
14636
14637                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14638                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
14639
14640                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
14641                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
14642
14643                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14644                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
14645         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
14646                 bool has_edp, has_port;
14647
14648                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
14649                         intel_crt_init(dev_priv);
14650
14651                 /*
14652                  * The DP_DETECTED bit is the latched state of the DDC
14653                  * SDA pin at boot. However since eDP doesn't require DDC
14654                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
14655                  * eDP ports may have been muxed to an alternate function.
14656                  * Thus we can't rely on the DP_DETECTED bit alone to detect
14657                  * eDP ports. Consult the VBT as well as DP_DETECTED to
14658                  * detect eDP ports.
14659                  *
14660                  * Sadly the straps seem to be missing sometimes even for HDMI
14661                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14662                  * and VBT for the presence of the port. Additionally we can't
14663                  * trust the port type the VBT declares as we've seen at least
14664                  * HDMI ports that the VBT claim are DP or eDP.
14665                  */
14666                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
14667                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14668                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14669                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
14670                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14671                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
14672
14673                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
14674                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14675                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14676                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
14677                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14678                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
14679
14680                 if (IS_CHERRYVIEW(dev_priv)) {
14681                         /*
14682                          * eDP not supported on port D,
14683                          * so no need to worry about it
14684                          */
14685                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14686                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14687                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
14688                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14689                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
14690                 }
14691
14692                 vlv_dsi_init(dev_priv);
14693         } else if (IS_PINEVIEW(dev_priv)) {
14694                 intel_lvds_init(dev_priv);
14695                 intel_crt_init(dev_priv);
14696         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
14697                 bool found = false;
14698
14699                 if (IS_MOBILE(dev_priv))
14700                         intel_lvds_init(dev_priv);
14701
14702                 intel_crt_init(dev_priv);
14703
14704                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14705                         DRM_DEBUG_KMS("probing SDVOB\n");
14706                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
14707                         if (!found && IS_G4X(dev_priv)) {
14708                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14709                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
14710                         }
14711
14712                         if (!found && IS_G4X(dev_priv))
14713                                 intel_dp_init(dev_priv, DP_B, PORT_B);
14714                 }
14715
14716                 /* Before G4X SDVOC doesn't have its own detect register */
14717
14718                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14719                         DRM_DEBUG_KMS("probing SDVOC\n");
14720                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
14721                 }
14722
14723                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14724
14725                         if (IS_G4X(dev_priv)) {
14726                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14727                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
14728                         }
14729                         if (IS_G4X(dev_priv))
14730                                 intel_dp_init(dev_priv, DP_C, PORT_C);
14731                 }
14732
14733                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
14734                         intel_dp_init(dev_priv, DP_D, PORT_D);
14735
14736                 if (SUPPORTS_TV(dev_priv))
14737                         intel_tv_init(dev_priv);
14738         } else if (IS_GEN(dev_priv, 2)) {
14739                 if (IS_I85X(dev_priv))
14740                         intel_lvds_init(dev_priv);
14741
14742                 intel_crt_init(dev_priv);
14743                 intel_dvo_init(dev_priv);
14744         }
14745
14746         intel_psr_init(dev_priv);
14747
14748         for_each_intel_encoder(&dev_priv->drm, encoder) {
14749                 encoder->base.possible_crtcs = encoder->crtc_mask;
14750                 encoder->base.possible_clones =
14751                         intel_encoder_clones(encoder);
14752         }
14753
14754         intel_init_pch_refclk(dev_priv);
14755
14756         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
14757 }
14758
14759 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14760 {
14761         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14762         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14763
14764         drm_framebuffer_cleanup(fb);
14765
14766         i915_gem_object_lock(obj);
14767         WARN_ON(!obj->framebuffer_references--);
14768         i915_gem_object_unlock(obj);
14769
14770         i915_gem_object_put(obj);
14771
14772         kfree(intel_fb);
14773 }
14774
14775 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14776                                                 struct drm_file *file,
14777                                                 unsigned int *handle)
14778 {
14779         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14780
14781         if (obj->userptr.mm) {
14782                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14783                 return -EINVAL;
14784         }
14785
14786         return drm_gem_handle_create(file, &obj->base, handle);
14787 }
14788
14789 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14790                                         struct drm_file *file,
14791                                         unsigned flags, unsigned color,
14792                                         struct drm_clip_rect *clips,
14793                                         unsigned num_clips)
14794 {
14795         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14796
14797         i915_gem_object_flush_if_display(obj);
14798         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
14799
14800         return 0;
14801 }
14802
14803 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14804         .destroy = intel_user_framebuffer_destroy,
14805         .create_handle = intel_user_framebuffer_create_handle,
14806         .dirty = intel_user_framebuffer_dirty,
14807 };
14808
14809 static
14810 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
14811                          u32 pixel_format, u64 fb_modifier)
14812 {
14813         struct intel_crtc *crtc;
14814         struct intel_plane *plane;
14815
14816         /*
14817          * We assume the primary plane for pipe A has
14818          * the highest stride limits of them all.
14819          */
14820         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
14821         plane = to_intel_plane(crtc->base.primary);
14822
14823         return plane->max_stride(plane, pixel_format, fb_modifier,
14824                                  DRM_MODE_ROTATE_0);
14825 }
14826
14827 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14828                                   struct drm_i915_gem_object *obj,
14829                                   struct drm_mode_fb_cmd2 *mode_cmd)
14830 {
14831         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
14832         struct drm_framebuffer *fb = &intel_fb->base;
14833         u32 pitch_limit;
14834         unsigned int tiling, stride;
14835         int ret = -EINVAL;
14836         int i;
14837
14838         i915_gem_object_lock(obj);
14839         obj->framebuffer_references++;
14840         tiling = i915_gem_object_get_tiling(obj);
14841         stride = i915_gem_object_get_stride(obj);
14842         i915_gem_object_unlock(obj);
14843
14844         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14845                 /*
14846                  * If there's a fence, enforce that
14847                  * the fb modifier and tiling mode match.
14848                  */
14849                 if (tiling != I915_TILING_NONE &&
14850                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14851                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
14852                         goto err;
14853                 }
14854         } else {
14855                 if (tiling == I915_TILING_X) {
14856                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14857                 } else if (tiling == I915_TILING_Y) {
14858                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
14859                         goto err;
14860                 }
14861         }
14862
14863         if (!drm_any_plane_has_format(&dev_priv->drm,
14864                                       mode_cmd->pixel_format,
14865                                       mode_cmd->modifier[0])) {
14866                 struct drm_format_name_buf format_name;
14867
14868                 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
14869                               drm_get_format_name(mode_cmd->pixel_format,
14870                                                   &format_name),
14871                               mode_cmd->modifier[0]);
14872                 goto err;
14873         }
14874
14875         /*
14876          * gen2/3 display engine uses the fence if present,
14877          * so the tiling mode must match the fb modifier exactly.
14878          */
14879         if (INTEL_GEN(dev_priv) < 4 &&
14880             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14881                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
14882                 goto err;
14883         }
14884
14885         pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
14886                                            mode_cmd->modifier[0]);
14887         if (mode_cmd->pitches[0] > pitch_limit) {
14888                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
14889                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
14890                               "tiled" : "linear",
14891                               mode_cmd->pitches[0], pitch_limit);
14892                 goto err;
14893         }
14894
14895         /*
14896          * If there's a fence, enforce that
14897          * the fb pitch and fence stride match.
14898          */
14899         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
14900                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
14901                               mode_cmd->pitches[0], stride);
14902                 goto err;
14903         }
14904
14905         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14906         if (mode_cmd->offsets[0] != 0)
14907                 goto err;
14908
14909         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
14910
14911         for (i = 0; i < fb->format->num_planes; i++) {
14912                 u32 stride_alignment;
14913
14914                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
14915                         DRM_DEBUG_KMS("bad plane %d handle\n", i);
14916                         goto err;
14917                 }
14918
14919                 stride_alignment = intel_fb_stride_alignment(fb, i);
14920
14921                 /*
14922                  * Display WA #0531: skl,bxt,kbl,glk
14923                  *
14924                  * Render decompression and plane width > 3840
14925                  * combined with horizontal panning requires the
14926                  * plane stride to be a multiple of 4. We'll just
14927                  * require the entire fb to accommodate that to avoid
14928                  * potential runtime errors at plane configuration time.
14929                  */
14930                 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
14931                     is_ccs_modifier(fb->modifier))
14932                         stride_alignment *= 4;
14933
14934                 if (fb->pitches[i] & (stride_alignment - 1)) {
14935                         DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
14936                                       i, fb->pitches[i], stride_alignment);
14937                         goto err;
14938                 }
14939
14940                 fb->obj[i] = &obj->base;
14941         }
14942
14943         ret = intel_fill_fb_info(dev_priv, fb);
14944         if (ret)
14945                 goto err;
14946
14947         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
14948         if (ret) {
14949                 DRM_ERROR("framebuffer init failed %d\n", ret);
14950                 goto err;
14951         }
14952
14953         return 0;
14954
14955 err:
14956         i915_gem_object_lock(obj);
14957         obj->framebuffer_references--;
14958         i915_gem_object_unlock(obj);
14959         return ret;
14960 }
14961
14962 static struct drm_framebuffer *
14963 intel_user_framebuffer_create(struct drm_device *dev,
14964                               struct drm_file *filp,
14965                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
14966 {
14967         struct drm_framebuffer *fb;
14968         struct drm_i915_gem_object *obj;
14969         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14970
14971         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
14972         if (!obj)
14973                 return ERR_PTR(-ENOENT);
14974
14975         fb = intel_framebuffer_create(obj, &mode_cmd);
14976         if (IS_ERR(fb))
14977                 i915_gem_object_put(obj);
14978
14979         return fb;
14980 }
14981
14982 static void intel_atomic_state_free(struct drm_atomic_state *state)
14983 {
14984         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14985
14986         drm_atomic_state_default_release(state);
14987
14988         i915_sw_fence_fini(&intel_state->commit_ready);
14989
14990         kfree(state);
14991 }
14992
14993 static enum drm_mode_status
14994 intel_mode_valid(struct drm_device *dev,
14995                  const struct drm_display_mode *mode)
14996 {
14997         struct drm_i915_private *dev_priv = to_i915(dev);
14998         int hdisplay_max, htotal_max;
14999         int vdisplay_max, vtotal_max;
15000
15001         /*
15002          * Can't reject DBLSCAN here because Xorg ddxen can add piles
15003          * of DBLSCAN modes to the output's mode list when they detect
15004          * the scaling mode property on the connector. And they don't
15005          * ask the kernel to validate those modes in any way until
15006          * modeset time at which point the client gets a protocol error.
15007          * So in order to not upset those clients we silently ignore the
15008          * DBLSCAN flag on such connectors. For other connectors we will
15009          * reject modes with the DBLSCAN flag in encoder->compute_config().
15010          * And we always reject DBLSCAN modes in connector->mode_valid()
15011          * as we never want such modes on the connector's mode list.
15012          */
15013
15014         if (mode->vscan > 1)
15015                 return MODE_NO_VSCAN;
15016
15017         if (mode->flags & DRM_MODE_FLAG_HSKEW)
15018                 return MODE_H_ILLEGAL;
15019
15020         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
15021                            DRM_MODE_FLAG_NCSYNC |
15022                            DRM_MODE_FLAG_PCSYNC))
15023                 return MODE_HSYNC;
15024
15025         if (mode->flags & (DRM_MODE_FLAG_BCAST |
15026                            DRM_MODE_FLAG_PIXMUX |
15027                            DRM_MODE_FLAG_CLKDIV2))
15028                 return MODE_BAD;
15029
15030         if (INTEL_GEN(dev_priv) >= 9 ||
15031             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
15032                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
15033                 vdisplay_max = 4096;
15034                 htotal_max = 8192;
15035                 vtotal_max = 8192;
15036         } else if (INTEL_GEN(dev_priv) >= 3) {
15037                 hdisplay_max = 4096;
15038                 vdisplay_max = 4096;
15039                 htotal_max = 8192;
15040                 vtotal_max = 8192;
15041         } else {
15042                 hdisplay_max = 2048;
15043                 vdisplay_max = 2048;
15044                 htotal_max = 4096;
15045                 vtotal_max = 4096;
15046         }
15047
15048         if (mode->hdisplay > hdisplay_max ||
15049             mode->hsync_start > htotal_max ||
15050             mode->hsync_end > htotal_max ||
15051             mode->htotal > htotal_max)
15052                 return MODE_H_ILLEGAL;
15053
15054         if (mode->vdisplay > vdisplay_max ||
15055             mode->vsync_start > vtotal_max ||
15056             mode->vsync_end > vtotal_max ||
15057             mode->vtotal > vtotal_max)
15058                 return MODE_V_ILLEGAL;
15059
15060         return MODE_OK;
15061 }
15062
15063 static const struct drm_mode_config_funcs intel_mode_funcs = {
15064         .fb_create = intel_user_framebuffer_create,
15065         .get_format_info = intel_get_format_info,
15066         .output_poll_changed = intel_fbdev_output_poll_changed,
15067         .mode_valid = intel_mode_valid,
15068         .atomic_check = intel_atomic_check,
15069         .atomic_commit = intel_atomic_commit,
15070         .atomic_state_alloc = intel_atomic_state_alloc,
15071         .atomic_state_clear = intel_atomic_state_clear,
15072         .atomic_state_free = intel_atomic_state_free,
15073 };
15074
15075 /**
15076  * intel_init_display_hooks - initialize the display modesetting hooks
15077  * @dev_priv: device private
15078  */
15079 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15080 {
15081         intel_init_cdclk_hooks(dev_priv);
15082
15083         if (INTEL_GEN(dev_priv) >= 9) {
15084                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15085                 dev_priv->display.get_initial_plane_config =
15086                         skylake_get_initial_plane_config;
15087                 dev_priv->display.crtc_compute_clock =
15088                         haswell_crtc_compute_clock;
15089                 dev_priv->display.crtc_enable = haswell_crtc_enable;
15090                 dev_priv->display.crtc_disable = haswell_crtc_disable;
15091         } else if (HAS_DDI(dev_priv)) {
15092                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15093                 dev_priv->display.get_initial_plane_config =
15094                         i9xx_get_initial_plane_config;
15095                 dev_priv->display.crtc_compute_clock =
15096                         haswell_crtc_compute_clock;
15097                 dev_priv->display.crtc_enable = haswell_crtc_enable;
15098                 dev_priv->display.crtc_disable = haswell_crtc_disable;
15099         } else if (HAS_PCH_SPLIT(dev_priv)) {
15100                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
15101                 dev_priv->display.get_initial_plane_config =
15102                         i9xx_get_initial_plane_config;
15103                 dev_priv->display.crtc_compute_clock =
15104                         ironlake_crtc_compute_clock;
15105                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15106                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
15107         } else if (IS_CHERRYVIEW(dev_priv)) {
15108                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15109                 dev_priv->display.get_initial_plane_config =
15110                         i9xx_get_initial_plane_config;
15111                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15112                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15113                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15114         } else if (IS_VALLEYVIEW(dev_priv)) {
15115                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15116                 dev_priv->display.get_initial_plane_config =
15117                         i9xx_get_initial_plane_config;
15118                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
15119                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15120                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15121         } else if (IS_G4X(dev_priv)) {
15122                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15123                 dev_priv->display.get_initial_plane_config =
15124                         i9xx_get_initial_plane_config;
15125                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15126                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15127                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15128         } else if (IS_PINEVIEW(dev_priv)) {
15129                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15130                 dev_priv->display.get_initial_plane_config =
15131                         i9xx_get_initial_plane_config;
15132                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15133                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15134                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15135         } else if (!IS_GEN(dev_priv, 2)) {
15136                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15137                 dev_priv->display.get_initial_plane_config =
15138                         i9xx_get_initial_plane_config;
15139                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15140                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15141                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15142         } else {
15143                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15144                 dev_priv->display.get_initial_plane_config =
15145                         i9xx_get_initial_plane_config;
15146                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15147                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15148                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15149         }
15150
15151         if (IS_GEN(dev_priv, 5)) {
15152                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15153         } else if (IS_GEN(dev_priv, 6)) {
15154                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15155         } else if (IS_IVYBRIDGE(dev_priv)) {
15156                 /* FIXME: detect B0+ stepping and use auto training */
15157                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15158         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15159                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15160         }
15161
15162         if (INTEL_GEN(dev_priv) >= 9)
15163                 dev_priv->display.update_crtcs = skl_update_crtcs;
15164         else
15165                 dev_priv->display.update_crtcs = intel_update_crtcs;
15166 }
15167
15168 /* Disable the VGA plane that we never use */
15169 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15170 {
15171         struct pci_dev *pdev = dev_priv->drm.pdev;
15172         u8 sr1;
15173         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15174
15175         /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15176         vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15177         outb(SR01, VGA_SR_INDEX);
15178         sr1 = inb(VGA_SR_DATA);
15179         outb(sr1 | 1<<5, VGA_SR_DATA);
15180         vga_put(pdev, VGA_RSRC_LEGACY_IO);
15181         udelay(300);
15182
15183         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15184         POSTING_READ(vga_reg);
15185 }
15186
15187 void intel_modeset_init_hw(struct drm_device *dev)
15188 {
15189         struct drm_i915_private *dev_priv = to_i915(dev);
15190
15191         intel_update_cdclk(dev_priv);
15192         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15193         dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15194 }
15195
15196 /*
15197  * Calculate what we think the watermarks should be for the state we've read
15198  * out of the hardware and then immediately program those watermarks so that
15199  * we ensure the hardware settings match our internal state.
15200  *
15201  * We can calculate what we think WM's should be by creating a duplicate of the
15202  * current state (which was constructed during hardware readout) and running it
15203  * through the atomic check code to calculate new watermark values in the
15204  * state object.
15205  */
15206 static void sanitize_watermarks(struct drm_device *dev)
15207 {
15208         struct drm_i915_private *dev_priv = to_i915(dev);
15209         struct drm_atomic_state *state;
15210         struct intel_atomic_state *intel_state;
15211         struct drm_crtc *crtc;
15212         struct drm_crtc_state *cstate;
15213         struct drm_modeset_acquire_ctx ctx;
15214         int ret;
15215         int i;
15216
15217         /* Only supported on platforms that use atomic watermark design */
15218         if (!dev_priv->display.optimize_watermarks)
15219                 return;
15220
15221         /*
15222          * We need to hold connection_mutex before calling duplicate_state so
15223          * that the connector loop is protected.
15224          */
15225         drm_modeset_acquire_init(&ctx, 0);
15226 retry:
15227         ret = drm_modeset_lock_all_ctx(dev, &ctx);
15228         if (ret == -EDEADLK) {
15229                 drm_modeset_backoff(&ctx);
15230                 goto retry;
15231         } else if (WARN_ON(ret)) {
15232                 goto fail;
15233         }
15234
15235         state = drm_atomic_helper_duplicate_state(dev, &ctx);
15236         if (WARN_ON(IS_ERR(state)))
15237                 goto fail;
15238
15239         intel_state = to_intel_atomic_state(state);
15240
15241         /*
15242          * Hardware readout is the only time we don't want to calculate
15243          * intermediate watermarks (since we don't trust the current
15244          * watermarks).
15245          */
15246         if (!HAS_GMCH(dev_priv))
15247                 intel_state->skip_intermediate_wm = true;
15248
15249         ret = intel_atomic_check(dev, state);
15250         if (ret) {
15251                 /*
15252                  * If we fail here, it means that the hardware appears to be
15253                  * programmed in a way that shouldn't be possible, given our
15254                  * understanding of watermark requirements.  This might mean a
15255                  * mistake in the hardware readout code or a mistake in the
15256                  * watermark calculations for a given platform.  Raise a WARN
15257                  * so that this is noticeable.
15258                  *
15259                  * If this actually happens, we'll have to just leave the
15260                  * BIOS-programmed watermarks untouched and hope for the best.
15261                  */
15262                 WARN(true, "Could not determine valid watermarks for inherited state\n");
15263                 goto put_state;
15264         }
15265
15266         /* Write calculated watermark values back */
15267         for_each_new_crtc_in_state(state, crtc, cstate, i) {
15268                 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15269
15270                 cs->wm.need_postvbl_update = true;
15271                 dev_priv->display.optimize_watermarks(intel_state, cs);
15272
15273                 to_intel_crtc_state(crtc->state)->wm = cs->wm;
15274         }
15275
15276 put_state:
15277         drm_atomic_state_put(state);
15278 fail:
15279         drm_modeset_drop_locks(&ctx);
15280         drm_modeset_acquire_fini(&ctx);
15281 }
15282
15283 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15284 {
15285         if (IS_GEN(dev_priv, 5)) {
15286                 u32 fdi_pll_clk =
15287                         I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15288
15289                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
15290         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
15291                 dev_priv->fdi_pll_freq = 270000;
15292         } else {
15293                 return;
15294         }
15295
15296         DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15297 }
15298
15299 static int intel_initial_commit(struct drm_device *dev)
15300 {
15301         struct drm_atomic_state *state = NULL;
15302         struct drm_modeset_acquire_ctx ctx;
15303         struct drm_crtc *crtc;
15304         struct drm_crtc_state *crtc_state;
15305         int ret = 0;
15306
15307         state = drm_atomic_state_alloc(dev);
15308         if (!state)
15309                 return -ENOMEM;
15310
15311         drm_modeset_acquire_init(&ctx, 0);
15312
15313 retry:
15314         state->acquire_ctx = &ctx;
15315
15316         drm_for_each_crtc(crtc, dev) {
15317                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15318                 if (IS_ERR(crtc_state)) {
15319                         ret = PTR_ERR(crtc_state);
15320                         goto out;
15321                 }
15322
15323                 if (crtc_state->active) {
15324                         ret = drm_atomic_add_affected_planes(state, crtc);
15325                         if (ret)
15326                                 goto out;
15327
15328                         /*
15329                          * FIXME hack to force a LUT update to avoid the
15330                          * plane update forcing the pipe gamma on without
15331                          * having a proper LUT loaded. Remove once we
15332                          * have readout for pipe gamma enable.
15333                          */
15334                         crtc_state->color_mgmt_changed = true;
15335                 }
15336         }
15337
15338         ret = drm_atomic_commit(state);
15339
15340 out:
15341         if (ret == -EDEADLK) {
15342                 drm_atomic_state_clear(state);
15343                 drm_modeset_backoff(&ctx);
15344                 goto retry;
15345         }
15346
15347         drm_atomic_state_put(state);
15348
15349         drm_modeset_drop_locks(&ctx);
15350         drm_modeset_acquire_fini(&ctx);
15351
15352         return ret;
15353 }
15354
15355 int intel_modeset_init(struct drm_device *dev)
15356 {
15357         struct drm_i915_private *dev_priv = to_i915(dev);
15358         struct i915_ggtt *ggtt = &dev_priv->ggtt;
15359         enum pipe pipe;
15360         struct intel_crtc *crtc;
15361         int ret;
15362
15363         dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15364
15365         drm_mode_config_init(dev);
15366
15367         dev->mode_config.min_width = 0;
15368         dev->mode_config.min_height = 0;
15369
15370         dev->mode_config.preferred_depth = 24;
15371         dev->mode_config.prefer_shadow = 1;
15372
15373         dev->mode_config.allow_fb_modifiers = true;
15374
15375         dev->mode_config.funcs = &intel_mode_funcs;
15376
15377         init_llist_head(&dev_priv->atomic_helper.free_list);
15378         INIT_WORK(&dev_priv->atomic_helper.free_work,
15379                   intel_atomic_helper_free_state_worker);
15380
15381         intel_init_quirks(dev_priv);
15382
15383         intel_fbc_init(dev_priv);
15384
15385         intel_init_pm(dev_priv);
15386
15387         /*
15388          * There may be no VBT; and if the BIOS enabled SSC we can
15389          * just keep using it to avoid unnecessary flicker.  Whereas if the
15390          * BIOS isn't using it, don't assume it will work even if the VBT
15391          * indicates as much.
15392          */
15393         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
15394                 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15395                                             DREF_SSC1_ENABLE);
15396
15397                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15398                         DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15399                                      bios_lvds_use_ssc ? "en" : "dis",
15400                                      dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15401                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15402                 }
15403         }
15404
15405         /* maximum framebuffer dimensions */
15406         if (IS_GEN(dev_priv, 2)) {
15407                 dev->mode_config.max_width = 2048;
15408                 dev->mode_config.max_height = 2048;
15409         } else if (IS_GEN(dev_priv, 3)) {
15410                 dev->mode_config.max_width = 4096;
15411                 dev->mode_config.max_height = 4096;
15412         } else {
15413                 dev->mode_config.max_width = 8192;
15414                 dev->mode_config.max_height = 8192;
15415         }
15416
15417         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15418                 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15419                 dev->mode_config.cursor_height = 1023;
15420         } else if (IS_GEN(dev_priv, 2)) {
15421                 dev->mode_config.cursor_width = 64;
15422                 dev->mode_config.cursor_height = 64;
15423         } else {
15424                 dev->mode_config.cursor_width = 256;
15425                 dev->mode_config.cursor_height = 256;
15426         }
15427
15428         dev->mode_config.fb_base = ggtt->gmadr.start;
15429
15430         DRM_DEBUG_KMS("%d display pipe%s available.\n",
15431                       INTEL_INFO(dev_priv)->num_pipes,
15432                       INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
15433
15434         for_each_pipe(dev_priv, pipe) {
15435                 ret = intel_crtc_init(dev_priv, pipe);
15436                 if (ret) {
15437                         drm_mode_config_cleanup(dev);
15438                         return ret;
15439                 }
15440         }
15441
15442         intel_shared_dpll_init(dev);
15443         intel_update_fdi_pll_freq(dev_priv);
15444
15445         intel_update_czclk(dev_priv);
15446         intel_modeset_init_hw(dev);
15447
15448         if (dev_priv->max_cdclk_freq == 0)
15449                 intel_update_max_cdclk(dev_priv);
15450
15451         /* Just disable it once at startup */
15452         i915_disable_vga(dev_priv);
15453         intel_setup_outputs(dev_priv);
15454
15455         drm_modeset_lock_all(dev);
15456         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15457         drm_modeset_unlock_all(dev);
15458
15459         for_each_intel_crtc(dev, crtc) {
15460                 struct intel_initial_plane_config plane_config = {};
15461
15462                 if (!crtc->active)
15463                         continue;
15464
15465                 /*
15466                  * Note that reserving the BIOS fb up front prevents us
15467                  * from stuffing other stolen allocations like the ring
15468                  * on top.  This prevents some ugliness at boot time, and
15469                  * can even allow for smooth boot transitions if the BIOS
15470                  * fb is large enough for the active pipe configuration.
15471                  */
15472                 dev_priv->display.get_initial_plane_config(crtc,
15473                                                            &plane_config);
15474
15475                 /*
15476                  * If the fb is shared between multiple heads, we'll
15477                  * just get the first one.
15478                  */
15479                 intel_find_initial_plane_obj(crtc, &plane_config);
15480         }
15481
15482         /*
15483          * Make sure hardware watermarks really match the state we read out.
15484          * Note that we need to do this after reconstructing the BIOS fb's
15485          * since the watermark calculation done here will use pstate->fb.
15486          */
15487         if (!HAS_GMCH(dev_priv))
15488                 sanitize_watermarks(dev);
15489
15490         /*
15491          * Force all active planes to recompute their states. So that on
15492          * mode_setcrtc after probe, all the intel_plane_state variables
15493          * are already calculated and there is no assert_plane warnings
15494          * during bootup.
15495          */
15496         ret = intel_initial_commit(dev);
15497         if (ret)
15498                 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15499
15500         return 0;
15501 }
15502
15503 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15504 {
15505         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15506         /* 640x480@60Hz, ~25175 kHz */
15507         struct dpll clock = {
15508                 .m1 = 18,
15509                 .m2 = 7,
15510                 .p1 = 13,
15511                 .p2 = 4,
15512                 .n = 2,
15513         };
15514         u32 dpll, fp;
15515         int i;
15516
15517         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
15518
15519         DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15520                       pipe_name(pipe), clock.vco, clock.dot);
15521
15522         fp = i9xx_dpll_compute_fp(&clock);
15523         dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
15524                 DPLL_VGA_MODE_DIS |
15525                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
15526                 PLL_P2_DIVIDE_BY_4 |
15527                 PLL_REF_INPUT_DREFCLK |
15528                 DPLL_VCO_ENABLE;
15529
15530         I915_WRITE(FP0(pipe), fp);
15531         I915_WRITE(FP1(pipe), fp);
15532
15533         I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
15534         I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
15535         I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
15536         I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
15537         I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
15538         I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
15539         I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
15540
15541         /*
15542          * Apparently we need to have VGA mode enabled prior to changing
15543          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15544          * dividers, even though the register value does change.
15545          */
15546         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
15547         I915_WRITE(DPLL(pipe), dpll);
15548
15549         /* Wait for the clocks to stabilize. */
15550         POSTING_READ(DPLL(pipe));
15551         udelay(150);
15552
15553         /* The pixel multiplier can only be updated once the
15554          * DPLL is enabled and the clocks are stable.
15555          *
15556          * So write it again.
15557          */
15558         I915_WRITE(DPLL(pipe), dpll);
15559
15560         /* We do this three times for luck */
15561         for (i = 0; i < 3 ; i++) {
15562                 I915_WRITE(DPLL(pipe), dpll);
15563                 POSTING_READ(DPLL(pipe));
15564                 udelay(150); /* wait for warmup */
15565         }
15566
15567         I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
15568         POSTING_READ(PIPECONF(pipe));
15569
15570         intel_wait_for_pipe_scanline_moving(crtc);
15571 }
15572
15573 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15574 {
15575         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15576
15577         DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15578                       pipe_name(pipe));
15579
15580         WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
15581         WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
15582         WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
15583         WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
15584         WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
15585
15586         I915_WRITE(PIPECONF(pipe), 0);
15587         POSTING_READ(PIPECONF(pipe));
15588
15589         intel_wait_for_pipe_scanline_stopped(crtc);
15590
15591         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
15592         POSTING_READ(DPLL(pipe));
15593 }
15594
15595 static void
15596 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15597 {
15598         struct intel_crtc *crtc;
15599
15600         if (INTEL_GEN(dev_priv) >= 4)
15601                 return;
15602
15603         for_each_intel_crtc(&dev_priv->drm, crtc) {
15604                 struct intel_plane *plane =
15605                         to_intel_plane(crtc->base.primary);
15606                 struct intel_crtc *plane_crtc;
15607                 enum pipe pipe;
15608
15609                 if (!plane->get_hw_state(plane, &pipe))
15610                         continue;
15611
15612                 if (pipe == crtc->pipe)
15613                         continue;
15614
15615                 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
15616                               plane->base.base.id, plane->base.name);
15617
15618                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15619                 intel_plane_disable_noatomic(plane_crtc, plane);
15620         }
15621 }
15622
15623 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15624 {
15625         struct drm_device *dev = crtc->base.dev;
15626         struct intel_encoder *encoder;
15627
15628         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15629                 return true;
15630
15631         return false;
15632 }
15633
15634 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
15635 {
15636         struct drm_device *dev = encoder->base.dev;
15637         struct intel_connector *connector;
15638
15639         for_each_connector_on_encoder(dev, &encoder->base, connector)
15640                 return connector;
15641
15642         return NULL;
15643 }
15644
15645 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
15646                               enum pipe pch_transcoder)
15647 {
15648         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
15649                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
15650 }
15651
15652 static void intel_sanitize_crtc(struct intel_crtc *crtc,
15653                                 struct drm_modeset_acquire_ctx *ctx)
15654 {
15655         struct drm_device *dev = crtc->base.dev;
15656         struct drm_i915_private *dev_priv = to_i915(dev);
15657         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
15658         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
15659
15660         /* Clear any frame start delays used for debugging left by the BIOS */
15661         if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
15662                 i915_reg_t reg = PIPECONF(cpu_transcoder);
15663
15664                 I915_WRITE(reg,
15665                            I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15666         }
15667
15668         if (crtc_state->base.active) {
15669                 struct intel_plane *plane;
15670
15671                 /* Disable everything but the primary plane */
15672                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15673                         const struct intel_plane_state *plane_state =
15674                                 to_intel_plane_state(plane->base.state);
15675
15676                         if (plane_state->base.visible &&
15677                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
15678                                 intel_plane_disable_noatomic(crtc, plane);
15679                 }
15680
15681                 /*
15682                  * Disable any background color set by the BIOS, but enable the
15683                  * gamma and CSC to match how we program our planes.
15684                  */
15685                 if (INTEL_GEN(dev_priv) >= 9)
15686                         I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
15687                                    SKL_BOTTOM_COLOR_GAMMA_ENABLE |
15688                                    SKL_BOTTOM_COLOR_CSC_ENABLE);
15689         }
15690
15691         /* Adjust the state of the output pipe according to whether we
15692          * have active connectors/encoders. */
15693         if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
15694                 intel_crtc_disable_noatomic(&crtc->base, ctx);
15695
15696         if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
15697                 /*
15698                  * We start out with underrun reporting disabled to avoid races.
15699                  * For correct bookkeeping mark this on active crtcs.
15700                  *
15701                  * Also on gmch platforms we dont have any hardware bits to
15702                  * disable the underrun reporting. Which means we need to start
15703                  * out with underrun reporting disabled also on inactive pipes,
15704                  * since otherwise we'll complain about the garbage we read when
15705                  * e.g. coming up after runtime pm.
15706                  *
15707                  * No protection against concurrent access is required - at
15708                  * worst a fifo underrun happens which also sets this to false.
15709                  */
15710                 crtc->cpu_fifo_underrun_disabled = true;
15711                 /*
15712                  * We track the PCH trancoder underrun reporting state
15713                  * within the crtc. With crtc for pipe A housing the underrun
15714                  * reporting state for PCH transcoder A, crtc for pipe B housing
15715                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
15716                  * and marking underrun reporting as disabled for the non-existing
15717                  * PCH transcoders B and C would prevent enabling the south
15718                  * error interrupt (see cpt_can_enable_serr_int()).
15719                  */
15720                 if (has_pch_trancoder(dev_priv, crtc->pipe))
15721                         crtc->pch_fifo_underrun_disabled = true;
15722         }
15723 }
15724
15725 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
15726 {
15727         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
15728
15729         /*
15730          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
15731          * the hardware when a high res displays plugged in. DPLL P
15732          * divider is zero, and the pipe timings are bonkers. We'll
15733          * try to disable everything in that case.
15734          *
15735          * FIXME would be nice to be able to sanitize this state
15736          * without several WARNs, but for now let's take the easy
15737          * road.
15738          */
15739         return IS_GEN(dev_priv, 6) &&
15740                 crtc_state->base.active &&
15741                 crtc_state->shared_dpll &&
15742                 crtc_state->port_clock == 0;
15743 }
15744
15745 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15746 {
15747         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
15748         struct intel_connector *connector;
15749         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
15750         struct intel_crtc_state *crtc_state = crtc ?
15751                 to_intel_crtc_state(crtc->base.state) : NULL;
15752
15753         /* We need to check both for a crtc link (meaning that the
15754          * encoder is active and trying to read from a pipe) and the
15755          * pipe itself being active. */
15756         bool has_active_crtc = crtc_state &&
15757                 crtc_state->base.active;
15758
15759         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
15760                 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
15761                               pipe_name(crtc->pipe));
15762                 has_active_crtc = false;
15763         }
15764
15765         connector = intel_encoder_find_connector(encoder);
15766         if (connector && !has_active_crtc) {
15767                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15768                               encoder->base.base.id,
15769                               encoder->base.name);
15770
15771                 /* Connector is active, but has no active pipe. This is
15772                  * fallout from our resume register restoring. Disable
15773                  * the encoder manually again. */
15774                 if (crtc_state) {
15775                         struct drm_encoder *best_encoder;
15776
15777                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15778                                       encoder->base.base.id,
15779                                       encoder->base.name);
15780
15781                         /* avoid oopsing in case the hooks consult best_encoder */
15782                         best_encoder = connector->base.state->best_encoder;
15783                         connector->base.state->best_encoder = &encoder->base;
15784
15785                         if (encoder->disable)
15786                                 encoder->disable(encoder, crtc_state,
15787                                                  connector->base.state);
15788                         if (encoder->post_disable)
15789                                 encoder->post_disable(encoder, crtc_state,
15790                                                       connector->base.state);
15791
15792                         connector->base.state->best_encoder = best_encoder;
15793                 }
15794                 encoder->base.crtc = NULL;
15795
15796                 /* Inconsistent output/port/pipe state happens presumably due to
15797                  * a bug in one of the get_hw_state functions. Or someplace else
15798                  * in our code, like the register restore mess on resume. Clamp
15799                  * things to off as a safer default. */
15800
15801                 connector->base.dpms = DRM_MODE_DPMS_OFF;
15802                 connector->base.encoder = NULL;
15803         }
15804
15805         /* notify opregion of the sanitized encoder state */
15806         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
15807
15808         if (INTEL_GEN(dev_priv) >= 11)
15809                 icl_sanitize_encoder_pll_mapping(encoder);
15810 }
15811
15812 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
15813 {
15814         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15815
15816         if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15817                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15818                 i915_disable_vga(dev_priv);
15819         }
15820 }
15821
15822 void i915_redisable_vga(struct drm_i915_private *dev_priv)
15823 {
15824         intel_wakeref_t wakeref;
15825
15826         /*
15827          * This function can be called both from intel_modeset_setup_hw_state or
15828          * at a very early point in our resume sequence, where the power well
15829          * structures are not yet restored. Since this function is at a very
15830          * paranoid "someone might have enabled VGA while we were not looking"
15831          * level, just check if the power well is enabled instead of trying to
15832          * follow the "don't touch the power well if we don't need it" policy
15833          * the rest of the driver uses.
15834          */
15835         wakeref = intel_display_power_get_if_enabled(dev_priv,
15836                                                      POWER_DOMAIN_VGA);
15837         if (!wakeref)
15838                 return;
15839
15840         i915_redisable_vga_power_on(dev_priv);
15841
15842         intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
15843 }
15844
15845 /* FIXME read out full plane state for all planes */
15846 static void readout_plane_state(struct drm_i915_private *dev_priv)
15847 {
15848         struct intel_plane *plane;
15849         struct intel_crtc *crtc;
15850
15851         for_each_intel_plane(&dev_priv->drm, plane) {
15852                 struct intel_plane_state *plane_state =
15853                         to_intel_plane_state(plane->base.state);
15854                 struct intel_crtc_state *crtc_state;
15855                 enum pipe pipe = PIPE_A;
15856                 bool visible;
15857
15858                 visible = plane->get_hw_state(plane, &pipe);
15859
15860                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15861                 crtc_state = to_intel_crtc_state(crtc->base.state);
15862
15863                 intel_set_plane_visible(crtc_state, plane_state, visible);
15864
15865                 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
15866                               plane->base.base.id, plane->base.name,
15867                               enableddisabled(visible), pipe_name(pipe));
15868         }
15869
15870         for_each_intel_crtc(&dev_priv->drm, crtc) {
15871                 struct intel_crtc_state *crtc_state =
15872                         to_intel_crtc_state(crtc->base.state);
15873
15874                 fixup_active_planes(crtc_state);
15875         }
15876 }
15877
15878 static void intel_modeset_readout_hw_state(struct drm_device *dev)
15879 {
15880         struct drm_i915_private *dev_priv = to_i915(dev);
15881         enum pipe pipe;
15882         struct intel_crtc *crtc;
15883         struct intel_encoder *encoder;
15884         struct intel_connector *connector;
15885         struct drm_connector_list_iter conn_iter;
15886         int i;
15887
15888         dev_priv->active_crtcs = 0;
15889
15890         for_each_intel_crtc(dev, crtc) {
15891                 struct intel_crtc_state *crtc_state =
15892                         to_intel_crtc_state(crtc->base.state);
15893
15894                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
15895                 memset(crtc_state, 0, sizeof(*crtc_state));
15896                 crtc_state->base.crtc = &crtc->base;
15897
15898                 crtc_state->base.active = crtc_state->base.enable =
15899                         dev_priv->display.get_pipe_config(crtc, crtc_state);
15900
15901                 crtc->base.enabled = crtc_state->base.enable;
15902                 crtc->active = crtc_state->base.active;
15903
15904                 if (crtc_state->base.active)
15905                         dev_priv->active_crtcs |= 1 << crtc->pipe;
15906
15907                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15908                               crtc->base.base.id, crtc->base.name,
15909                               enableddisabled(crtc_state->base.active));
15910         }
15911
15912         readout_plane_state(dev_priv);
15913
15914         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15915                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15916
15917                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
15918                                                         &pll->state.hw_state);
15919                 pll->state.crtc_mask = 0;
15920                 for_each_intel_crtc(dev, crtc) {
15921                         struct intel_crtc_state *crtc_state =
15922                                 to_intel_crtc_state(crtc->base.state);
15923
15924                         if (crtc_state->base.active &&
15925                             crtc_state->shared_dpll == pll)
15926                                 pll->state.crtc_mask |= 1 << crtc->pipe;
15927                 }
15928                 pll->active_mask = pll->state.crtc_mask;
15929
15930                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15931                               pll->info->name, pll->state.crtc_mask, pll->on);
15932         }
15933
15934         for_each_intel_encoder(dev, encoder) {
15935                 pipe = 0;
15936
15937                 if (encoder->get_hw_state(encoder, &pipe)) {
15938                         struct intel_crtc_state *crtc_state;
15939
15940                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15941                         crtc_state = to_intel_crtc_state(crtc->base.state);
15942
15943                         encoder->base.crtc = &crtc->base;
15944                         encoder->get_config(encoder, crtc_state);
15945                 } else {
15946                         encoder->base.crtc = NULL;
15947                 }
15948
15949                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15950                               encoder->base.base.id, encoder->base.name,
15951                               enableddisabled(encoder->base.crtc),
15952                               pipe_name(pipe));
15953         }
15954
15955         drm_connector_list_iter_begin(dev, &conn_iter);
15956         for_each_intel_connector_iter(connector, &conn_iter) {
15957                 if (connector->get_hw_state(connector)) {
15958                         connector->base.dpms = DRM_MODE_DPMS_ON;
15959
15960                         encoder = connector->encoder;
15961                         connector->base.encoder = &encoder->base;
15962
15963                         if (encoder->base.crtc &&
15964                             encoder->base.crtc->state->active) {
15965                                 /*
15966                                  * This has to be done during hardware readout
15967                                  * because anything calling .crtc_disable may
15968                                  * rely on the connector_mask being accurate.
15969                                  */
15970                                 encoder->base.crtc->state->connector_mask |=
15971                                         drm_connector_mask(&connector->base);
15972                                 encoder->base.crtc->state->encoder_mask |=
15973                                         drm_encoder_mask(&encoder->base);
15974                         }
15975
15976                 } else {
15977                         connector->base.dpms = DRM_MODE_DPMS_OFF;
15978                         connector->base.encoder = NULL;
15979                 }
15980                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15981                               connector->base.base.id, connector->base.name,
15982                               enableddisabled(connector->base.encoder));
15983         }
15984         drm_connector_list_iter_end(&conn_iter);
15985
15986         for_each_intel_crtc(dev, crtc) {
15987                 struct intel_crtc_state *crtc_state =
15988                         to_intel_crtc_state(crtc->base.state);
15989                 int min_cdclk = 0;
15990
15991                 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15992                 if (crtc_state->base.active) {
15993                         intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
15994                         crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
15995                         crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
15996                         intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
15997                         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15998
15999                         /*
16000                          * The initial mode needs to be set in order to keep
16001                          * the atomic core happy. It wants a valid mode if the
16002                          * crtc's enabled, so we do the above call.
16003                          *
16004                          * But we don't set all the derived state fully, hence
16005                          * set a flag to indicate that a full recalculation is
16006                          * needed on the next commit.
16007                          */
16008                         crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
16009
16010                         intel_crtc_compute_pixel_rate(crtc_state);
16011
16012                         if (dev_priv->display.modeset_calc_cdclk) {
16013                                 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
16014                                 if (WARN_ON(min_cdclk < 0))
16015                                         min_cdclk = 0;
16016                         }
16017
16018                         drm_calc_timestamping_constants(&crtc->base,
16019                                                         &crtc_state->base.adjusted_mode);
16020                         update_scanline_offset(crtc_state);
16021                 }
16022
16023                 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
16024                 dev_priv->min_voltage_level[crtc->pipe] =
16025                         crtc_state->min_voltage_level;
16026
16027                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
16028         }
16029 }
16030
16031 static void
16032 get_encoder_power_domains(struct drm_i915_private *dev_priv)
16033 {
16034         struct intel_encoder *encoder;
16035
16036         for_each_intel_encoder(&dev_priv->drm, encoder) {
16037                 u64 get_domains;
16038                 enum intel_display_power_domain domain;
16039                 struct intel_crtc_state *crtc_state;
16040
16041                 if (!encoder->get_power_domains)
16042                         continue;
16043
16044                 /*
16045                  * MST-primary and inactive encoders don't have a crtc state
16046                  * and neither of these require any power domain references.
16047                  */
16048                 if (!encoder->base.crtc)
16049                         continue;
16050
16051                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
16052                 get_domains = encoder->get_power_domains(encoder, crtc_state);
16053                 for_each_power_domain(domain, get_domains)
16054                         intel_display_power_get(dev_priv, domain);
16055         }
16056 }
16057
16058 static void intel_early_display_was(struct drm_i915_private *dev_priv)
16059 {
16060         /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
16061         if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
16062                 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
16063                            DARBF_GATING_DIS);
16064
16065         if (IS_HASWELL(dev_priv)) {
16066                 /*
16067                  * WaRsPkgCStateDisplayPMReq:hsw
16068                  * System hang if this isn't done before disabling all planes!
16069                  */
16070                 I915_WRITE(CHICKEN_PAR1_1,
16071                            I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
16072         }
16073 }
16074
16075 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
16076                                        enum port port, i915_reg_t hdmi_reg)
16077 {
16078         u32 val = I915_READ(hdmi_reg);
16079
16080         if (val & SDVO_ENABLE ||
16081             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
16082                 return;
16083
16084         DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
16085                       port_name(port));
16086
16087         val &= ~SDVO_PIPE_SEL_MASK;
16088         val |= SDVO_PIPE_SEL(PIPE_A);
16089
16090         I915_WRITE(hdmi_reg, val);
16091 }
16092
16093 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
16094                                      enum port port, i915_reg_t dp_reg)
16095 {
16096         u32 val = I915_READ(dp_reg);
16097
16098         if (val & DP_PORT_EN ||
16099             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
16100                 return;
16101
16102         DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
16103                       port_name(port));
16104
16105         val &= ~DP_PIPE_SEL_MASK;
16106         val |= DP_PIPE_SEL(PIPE_A);
16107
16108         I915_WRITE(dp_reg, val);
16109 }
16110
16111 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
16112 {
16113         /*
16114          * The BIOS may select transcoder B on some of the PCH
16115          * ports even it doesn't enable the port. This would trip
16116          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
16117          * Sanitize the transcoder select bits to prevent that. We
16118          * assume that the BIOS never actually enabled the port,
16119          * because if it did we'd actually have to toggle the port
16120          * on and back off to make the transcoder A select stick
16121          * (see. intel_dp_link_down(), intel_disable_hdmi(),
16122          * intel_disable_sdvo()).
16123          */
16124         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
16125         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
16126         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
16127
16128         /* PCH SDVOB multiplex with HDMIB */
16129         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
16130         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
16131         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
16132 }
16133
16134 /* Scan out the current hw modeset state,
16135  * and sanitizes it to the current state
16136  */
16137 static void
16138 intel_modeset_setup_hw_state(struct drm_device *dev,
16139                              struct drm_modeset_acquire_ctx *ctx)
16140 {
16141         struct drm_i915_private *dev_priv = to_i915(dev);
16142         struct intel_crtc_state *crtc_state;
16143         struct intel_encoder *encoder;
16144         struct intel_crtc *crtc;
16145         intel_wakeref_t wakeref;
16146         int i;
16147
16148         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
16149
16150         intel_early_display_was(dev_priv);
16151         intel_modeset_readout_hw_state(dev);
16152
16153         /* HW state is read out, now we need to sanitize this mess. */
16154         get_encoder_power_domains(dev_priv);
16155
16156         if (HAS_PCH_IBX(dev_priv))
16157                 ibx_sanitize_pch_ports(dev_priv);
16158
16159         /*
16160          * intel_sanitize_plane_mapping() may need to do vblank
16161          * waits, so we need vblank interrupts restored beforehand.
16162          */
16163         for_each_intel_crtc(&dev_priv->drm, crtc) {
16164                 crtc_state = to_intel_crtc_state(crtc->base.state);
16165
16166                 drm_crtc_vblank_reset(&crtc->base);
16167
16168                 if (crtc_state->base.active)
16169                         intel_crtc_vblank_on(crtc_state);
16170         }
16171
16172         intel_sanitize_plane_mapping(dev_priv);
16173
16174         for_each_intel_encoder(dev, encoder)
16175                 intel_sanitize_encoder(encoder);
16176
16177         for_each_intel_crtc(&dev_priv->drm, crtc) {
16178                 crtc_state = to_intel_crtc_state(crtc->base.state);
16179                 intel_sanitize_crtc(crtc, ctx);
16180                 intel_dump_pipe_config(crtc, crtc_state,
16181                                        "[setup_hw_state]");
16182         }
16183
16184         intel_modeset_update_connector_atomic_state(dev);
16185
16186         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16187                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16188
16189                 if (!pll->on || pll->active_mask)
16190                         continue;
16191
16192                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
16193                               pll->info->name);
16194
16195                 pll->info->funcs->disable(dev_priv, pll);
16196                 pll->on = false;
16197         }
16198
16199         if (IS_G4X(dev_priv)) {
16200                 g4x_wm_get_hw_state(dev_priv);
16201                 g4x_wm_sanitize(dev_priv);
16202         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16203                 vlv_wm_get_hw_state(dev_priv);
16204                 vlv_wm_sanitize(dev_priv);
16205         } else if (INTEL_GEN(dev_priv) >= 9) {
16206                 skl_wm_get_hw_state(dev_priv);
16207         } else if (HAS_PCH_SPLIT(dev_priv)) {
16208                 ilk_wm_get_hw_state(dev_priv);
16209         }
16210
16211         for_each_intel_crtc(dev, crtc) {
16212                 u64 put_domains;
16213
16214                 crtc_state = to_intel_crtc_state(crtc->base.state);
16215                 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
16216                 if (WARN_ON(put_domains))
16217                         modeset_put_power_domains(dev_priv, put_domains);
16218         }
16219
16220         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
16221
16222         intel_fbc_init_pipe_state(dev_priv);
16223 }
16224
16225 void intel_display_resume(struct drm_device *dev)
16226 {
16227         struct drm_i915_private *dev_priv = to_i915(dev);
16228         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16229         struct drm_modeset_acquire_ctx ctx;
16230         int ret;
16231
16232         dev_priv->modeset_restore_state = NULL;
16233         if (state)
16234                 state->acquire_ctx = &ctx;
16235
16236         drm_modeset_acquire_init(&ctx, 0);
16237
16238         while (1) {
16239                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16240                 if (ret != -EDEADLK)
16241                         break;
16242
16243                 drm_modeset_backoff(&ctx);
16244         }
16245
16246         if (!ret)
16247                 ret = __intel_display_resume(dev, state, &ctx);
16248
16249         intel_enable_ipc(dev_priv);
16250         drm_modeset_drop_locks(&ctx);
16251         drm_modeset_acquire_fini(&ctx);
16252
16253         if (ret)
16254                 DRM_ERROR("Restoring old state failed with %i\n", ret);
16255         if (state)
16256                 drm_atomic_state_put(state);
16257 }
16258
16259 static void intel_hpd_poll_fini(struct drm_device *dev)
16260 {
16261         struct intel_connector *connector;
16262         struct drm_connector_list_iter conn_iter;
16263
16264         /* Kill all the work that may have been queued by hpd. */
16265         drm_connector_list_iter_begin(dev, &conn_iter);
16266         for_each_intel_connector_iter(connector, &conn_iter) {
16267                 if (connector->modeset_retry_work.func)
16268                         cancel_work_sync(&connector->modeset_retry_work);
16269                 if (connector->hdcp.shim) {
16270                         cancel_delayed_work_sync(&connector->hdcp.check_work);
16271                         cancel_work_sync(&connector->hdcp.prop_work);
16272                 }
16273         }
16274         drm_connector_list_iter_end(&conn_iter);
16275 }
16276
16277 void intel_modeset_cleanup(struct drm_device *dev)
16278 {
16279         struct drm_i915_private *dev_priv = to_i915(dev);
16280
16281         flush_workqueue(dev_priv->modeset_wq);
16282
16283         flush_work(&dev_priv->atomic_helper.free_work);
16284         WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16285
16286         /*
16287          * Interrupts and polling as the first thing to avoid creating havoc.
16288          * Too much stuff here (turning of connectors, ...) would
16289          * experience fancy races otherwise.
16290          */
16291         intel_irq_uninstall(dev_priv);
16292
16293         /*
16294          * Due to the hpd irq storm handling the hotplug work can re-arm the
16295          * poll handlers. Hence disable polling after hpd handling is shut down.
16296          */
16297         intel_hpd_poll_fini(dev);
16298
16299         /* poll work can call into fbdev, hence clean that up afterwards */
16300         intel_fbdev_fini(dev_priv);
16301
16302         intel_unregister_dsm_handler();
16303
16304         intel_fbc_global_disable(dev_priv);
16305
16306         /* flush any delayed tasks or pending work */
16307         flush_scheduled_work();
16308
16309         drm_mode_config_cleanup(dev);
16310
16311         intel_overlay_cleanup(dev_priv);
16312
16313         intel_teardown_gmbus(dev_priv);
16314
16315         destroy_workqueue(dev_priv->modeset_wq);
16316
16317         intel_fbc_cleanup_cfb(dev_priv);
16318 }
16319
16320 /*
16321  * set vga decode state - true == enable VGA decode
16322  */
16323 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
16324 {
16325         unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16326         u16 gmch_ctrl;
16327
16328         if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16329                 DRM_ERROR("failed to read control word\n");
16330                 return -EIO;
16331         }
16332
16333         if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16334                 return 0;
16335
16336         if (state)
16337                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16338         else
16339                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16340
16341         if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16342                 DRM_ERROR("failed to write control word\n");
16343                 return -EIO;
16344         }
16345
16346         return 0;
16347 }
16348
16349 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16350
16351 struct intel_display_error_state {
16352
16353         u32 power_well_driver;
16354
16355         int num_transcoders;
16356
16357         struct intel_cursor_error_state {
16358                 u32 control;
16359                 u32 position;
16360                 u32 base;
16361                 u32 size;
16362         } cursor[I915_MAX_PIPES];
16363
16364         struct intel_pipe_error_state {
16365                 bool power_domain_on;
16366                 u32 source;
16367                 u32 stat;
16368         } pipe[I915_MAX_PIPES];
16369
16370         struct intel_plane_error_state {
16371                 u32 control;
16372                 u32 stride;
16373                 u32 size;
16374                 u32 pos;
16375                 u32 addr;
16376                 u32 surface;
16377                 u32 tile_offset;
16378         } plane[I915_MAX_PIPES];
16379
16380         struct intel_transcoder_error_state {
16381                 bool power_domain_on;
16382                 enum transcoder cpu_transcoder;
16383
16384                 u32 conf;
16385
16386                 u32 htotal;
16387                 u32 hblank;
16388                 u32 hsync;
16389                 u32 vtotal;
16390                 u32 vblank;
16391                 u32 vsync;
16392         } transcoder[4];
16393 };
16394
16395 struct intel_display_error_state *
16396 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16397 {
16398         struct intel_display_error_state *error;
16399         int transcoders[] = {
16400                 TRANSCODER_A,
16401                 TRANSCODER_B,
16402                 TRANSCODER_C,
16403                 TRANSCODER_EDP,
16404         };
16405         int i;
16406
16407         if (!HAS_DISPLAY(dev_priv))
16408                 return NULL;
16409
16410         error = kzalloc(sizeof(*error), GFP_ATOMIC);
16411         if (error == NULL)
16412                 return NULL;
16413
16414         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16415                 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
16416
16417         for_each_pipe(dev_priv, i) {
16418                 error->pipe[i].power_domain_on =
16419                         __intel_display_power_is_enabled(dev_priv,
16420                                                          POWER_DOMAIN_PIPE(i));
16421                 if (!error->pipe[i].power_domain_on)
16422                         continue;
16423
16424                 error->cursor[i].control = I915_READ(CURCNTR(i));
16425                 error->cursor[i].position = I915_READ(CURPOS(i));
16426                 error->cursor[i].base = I915_READ(CURBASE(i));
16427
16428                 error->plane[i].control = I915_READ(DSPCNTR(i));
16429                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16430                 if (INTEL_GEN(dev_priv) <= 3) {
16431                         error->plane[i].size = I915_READ(DSPSIZE(i));
16432                         error->plane[i].pos = I915_READ(DSPPOS(i));
16433                 }
16434                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16435                         error->plane[i].addr = I915_READ(DSPADDR(i));
16436                 if (INTEL_GEN(dev_priv) >= 4) {
16437                         error->plane[i].surface = I915_READ(DSPSURF(i));
16438                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16439                 }
16440
16441                 error->pipe[i].source = I915_READ(PIPESRC(i));
16442
16443                 if (HAS_GMCH(dev_priv))
16444                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
16445         }
16446
16447         /* Note: this does not include DSI transcoders. */
16448         error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
16449         if (HAS_DDI(dev_priv))
16450                 error->num_transcoders++; /* Account for eDP. */
16451
16452         for (i = 0; i < error->num_transcoders; i++) {
16453                 enum transcoder cpu_transcoder = transcoders[i];
16454
16455                 error->transcoder[i].power_domain_on =
16456                         __intel_display_power_is_enabled(dev_priv,
16457                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16458                 if (!error->transcoder[i].power_domain_on)
16459                         continue;
16460
16461                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16462
16463                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16464                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16465                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16466                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16467                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16468                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16469                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16470         }
16471
16472         return error;
16473 }
16474
16475 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16476
16477 void
16478 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16479                                 struct intel_display_error_state *error)
16480 {
16481         struct drm_i915_private *dev_priv = m->i915;
16482         int i;
16483
16484         if (!error)
16485                 return;
16486
16487         err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
16488         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16489                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
16490                            error->power_well_driver);
16491         for_each_pipe(dev_priv, i) {
16492                 err_printf(m, "Pipe [%d]:\n", i);
16493                 err_printf(m, "  Power: %s\n",
16494                            onoff(error->pipe[i].power_domain_on));
16495                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
16496                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
16497
16498                 err_printf(m, "Plane [%d]:\n", i);
16499                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16500                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
16501                 if (INTEL_GEN(dev_priv) <= 3) {
16502                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16503                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
16504                 }
16505                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16506                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
16507                 if (INTEL_GEN(dev_priv) >= 4) {
16508                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16509                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
16510                 }
16511
16512                 err_printf(m, "Cursor [%d]:\n", i);
16513                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16514                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16515                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
16516         }
16517
16518         for (i = 0; i < error->num_transcoders; i++) {
16519                 err_printf(m, "CPU transcoder: %s\n",
16520                            transcoder_name(error->transcoder[i].cpu_transcoder));
16521                 err_printf(m, "  Power: %s\n",
16522                            onoff(error->transcoder[i].power_domain_on));
16523                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16524                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16525                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16526                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16527                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16528                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16529                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16530         }
16531 }
16532
16533 #endif