]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/display/intel_fbc.c
drm/i915/fbc: Disable fbc by default on all glk+
[linux.git] / drivers / gpu / drm / i915 / display / intel_fbc.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23
24 /**
25  * DOC: Frame Buffer Compression (FBC)
26  *
27  * FBC tries to save memory bandwidth (and so power consumption) by
28  * compressing the amount of memory used by the display. It is total
29  * transparent to user space and completely handled in the kernel.
30  *
31  * The benefits of FBC are mostly visible with solid backgrounds and
32  * variation-less patterns. It comes from keeping the memory footprint small
33  * and having fewer memory pages opened and accessed for refreshing the display.
34  *
35  * i915 is responsible to reserve stolen memory for FBC and configure its
36  * offset on proper registers. The hardware takes care of all
37  * compress/decompress. However there are many known cases where we have to
38  * forcibly disable it to allow proper screen updates.
39  */
40
41 #include <drm/drm_fourcc.h>
42
43 #include "i915_drv.h"
44 #include "intel_display_types.h"
45 #include "intel_fbc.h"
46 #include "intel_frontbuffer.h"
47
48 static inline bool fbc_supported(struct drm_i915_private *dev_priv)
49 {
50         return HAS_FBC(dev_priv);
51 }
52
53 static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
54 {
55         return INTEL_GEN(dev_priv) <= 3;
56 }
57
58 /*
59  * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
60  * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
61  * origin so the x and y offsets can actually fit the registers. As a
62  * consequence, the fence doesn't really start exactly at the display plane
63  * address we program because it starts at the real start of the buffer, so we
64  * have to take this into consideration here.
65  */
66 static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
67 {
68         return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
69 }
70
71 /*
72  * For SKL+, the plane source size used by the hardware is based on the value we
73  * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
74  * we wrote to PIPESRC.
75  */
76 static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
77                                             int *width, int *height)
78 {
79         if (width)
80                 *width = cache->plane.src_w;
81         if (height)
82                 *height = cache->plane.src_h;
83 }
84
85 static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
86                                         struct intel_fbc_state_cache *cache)
87 {
88         int lines;
89
90         intel_fbc_get_plane_source_size(cache, NULL, &lines);
91         if (IS_GEN(dev_priv, 7))
92                 lines = min(lines, 2048);
93         else if (INTEL_GEN(dev_priv) >= 8)
94                 lines = min(lines, 2560);
95
96         /* Hardware needs the full buffer stride, not just the active area. */
97         return lines * cache->fb.stride;
98 }
99
100 static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
101 {
102         u32 fbc_ctl;
103
104         /* Disable compression */
105         fbc_ctl = I915_READ(FBC_CONTROL);
106         if ((fbc_ctl & FBC_CTL_EN) == 0)
107                 return;
108
109         fbc_ctl &= ~FBC_CTL_EN;
110         I915_WRITE(FBC_CONTROL, fbc_ctl);
111
112         /* Wait for compressing bit to clear */
113         if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
114                                     FBC_STAT_COMPRESSING, 10)) {
115                 DRM_DEBUG_KMS("FBC idle timed out\n");
116                 return;
117         }
118 }
119
120 static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
121 {
122         struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
123         int cfb_pitch;
124         int i;
125         u32 fbc_ctl;
126
127         /* Note: fbc.threshold == 1 for i8xx */
128         cfb_pitch = params->cfb_size / FBC_LL_SIZE;
129         if (params->fb.stride < cfb_pitch)
130                 cfb_pitch = params->fb.stride;
131
132         /* FBC_CTL wants 32B or 64B units */
133         if (IS_GEN(dev_priv, 2))
134                 cfb_pitch = (cfb_pitch / 32) - 1;
135         else
136                 cfb_pitch = (cfb_pitch / 64) - 1;
137
138         /* Clear old tags */
139         for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
140                 I915_WRITE(FBC_TAG(i), 0);
141
142         if (IS_GEN(dev_priv, 4)) {
143                 u32 fbc_ctl2;
144
145                 /* Set it up... */
146                 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
147                 fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
148                 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
149                 I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
150         }
151
152         /* enable it... */
153         fbc_ctl = I915_READ(FBC_CONTROL);
154         fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
155         fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
156         if (IS_I945GM(dev_priv))
157                 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
158         fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
159         fbc_ctl |= params->vma->fence->id;
160         I915_WRITE(FBC_CONTROL, fbc_ctl);
161 }
162
163 static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
164 {
165         return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
166 }
167
168 static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
169 {
170         struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
171         u32 dpfc_ctl;
172
173         dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
174         if (params->fb.format->cpp[0] == 2)
175                 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
176         else
177                 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
178
179         if (params->flags & PLANE_HAS_FENCE) {
180                 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
181                 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
182         } else {
183                 I915_WRITE(DPFC_FENCE_YOFF, 0);
184         }
185
186         /* enable it... */
187         I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
188 }
189
190 static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
191 {
192         u32 dpfc_ctl;
193
194         /* Disable compression */
195         dpfc_ctl = I915_READ(DPFC_CONTROL);
196         if (dpfc_ctl & DPFC_CTL_EN) {
197                 dpfc_ctl &= ~DPFC_CTL_EN;
198                 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
199         }
200 }
201
202 static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
203 {
204         return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
205 }
206
207 /* This function forces a CFB recompression through the nuke operation. */
208 static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
209 {
210         I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
211         POSTING_READ(MSG_FBC_REND_STATE);
212 }
213
214 static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
215 {
216         struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
217         u32 dpfc_ctl;
218         int threshold = dev_priv->fbc.threshold;
219
220         dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
221         if (params->fb.format->cpp[0] == 2)
222                 threshold++;
223
224         switch (threshold) {
225         case 4:
226         case 3:
227                 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
228                 break;
229         case 2:
230                 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
231                 break;
232         case 1:
233                 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
234                 break;
235         }
236
237         if (params->flags & PLANE_HAS_FENCE) {
238                 dpfc_ctl |= DPFC_CTL_FENCE_EN;
239                 if (IS_GEN(dev_priv, 5))
240                         dpfc_ctl |= params->vma->fence->id;
241                 if (IS_GEN(dev_priv, 6)) {
242                         I915_WRITE(SNB_DPFC_CTL_SA,
243                                    SNB_CPU_FENCE_ENABLE |
244                                    params->vma->fence->id);
245                         I915_WRITE(DPFC_CPU_FENCE_OFFSET,
246                                    params->crtc.fence_y_offset);
247                 }
248         } else {
249                 if (IS_GEN(dev_priv, 6)) {
250                         I915_WRITE(SNB_DPFC_CTL_SA, 0);
251                         I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
252                 }
253         }
254
255         I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
256         I915_WRITE(ILK_FBC_RT_BASE,
257                    i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
258         /* enable it... */
259         I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
260
261         intel_fbc_recompress(dev_priv);
262 }
263
264 static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
265 {
266         u32 dpfc_ctl;
267
268         /* Disable compression */
269         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
270         if (dpfc_ctl & DPFC_CTL_EN) {
271                 dpfc_ctl &= ~DPFC_CTL_EN;
272                 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
273         }
274 }
275
276 static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
277 {
278         return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
279 }
280
281 static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
282 {
283         struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
284         u32 dpfc_ctl;
285         int threshold = dev_priv->fbc.threshold;
286
287         /* Display WA #0529: skl, kbl, bxt. */
288         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
289                 u32 val = I915_READ(CHICKEN_MISC_4);
290
291                 val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
292
293                 if (i915_gem_object_get_tiling(params->vma->obj) !=
294                     I915_TILING_X)
295                         val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
296
297                 I915_WRITE(CHICKEN_MISC_4, val);
298         }
299
300         dpfc_ctl = 0;
301         if (IS_IVYBRIDGE(dev_priv))
302                 dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
303
304         if (params->fb.format->cpp[0] == 2)
305                 threshold++;
306
307         switch (threshold) {
308         case 4:
309         case 3:
310                 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
311                 break;
312         case 2:
313                 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
314                 break;
315         case 1:
316                 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
317                 break;
318         }
319
320         if (params->flags & PLANE_HAS_FENCE) {
321                 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
322                 I915_WRITE(SNB_DPFC_CTL_SA,
323                            SNB_CPU_FENCE_ENABLE |
324                            params->vma->fence->id);
325                 I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
326         } else {
327                 I915_WRITE(SNB_DPFC_CTL_SA,0);
328                 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
329         }
330
331         if (dev_priv->fbc.false_color)
332                 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
333
334         if (IS_IVYBRIDGE(dev_priv)) {
335                 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
336                 I915_WRITE(ILK_DISPLAY_CHICKEN1,
337                            I915_READ(ILK_DISPLAY_CHICKEN1) |
338                            ILK_FBCQ_DIS);
339         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
340                 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
341                 I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
342                            I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
343                            HSW_FBCQ_DIS);
344         }
345
346         if (INTEL_GEN(dev_priv) >= 11)
347                 /* Wa_1409120013:icl,ehl,tgl */
348                 I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
349
350         I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
351
352         intel_fbc_recompress(dev_priv);
353 }
354
355 static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
356 {
357         if (INTEL_GEN(dev_priv) >= 5)
358                 return ilk_fbc_is_active(dev_priv);
359         else if (IS_GM45(dev_priv))
360                 return g4x_fbc_is_active(dev_priv);
361         else
362                 return i8xx_fbc_is_active(dev_priv);
363 }
364
365 static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
366 {
367         struct intel_fbc *fbc = &dev_priv->fbc;
368
369         fbc->active = true;
370
371         if (INTEL_GEN(dev_priv) >= 7)
372                 gen7_fbc_activate(dev_priv);
373         else if (INTEL_GEN(dev_priv) >= 5)
374                 ilk_fbc_activate(dev_priv);
375         else if (IS_GM45(dev_priv))
376                 g4x_fbc_activate(dev_priv);
377         else
378                 i8xx_fbc_activate(dev_priv);
379 }
380
381 static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
382 {
383         struct intel_fbc *fbc = &dev_priv->fbc;
384
385         fbc->active = false;
386
387         if (INTEL_GEN(dev_priv) >= 5)
388                 ilk_fbc_deactivate(dev_priv);
389         else if (IS_GM45(dev_priv))
390                 g4x_fbc_deactivate(dev_priv);
391         else
392                 i8xx_fbc_deactivate(dev_priv);
393 }
394
395 /**
396  * intel_fbc_is_active - Is FBC active?
397  * @dev_priv: i915 device instance
398  *
399  * This function is used to verify the current state of FBC.
400  *
401  * FIXME: This should be tracked in the plane config eventually
402  * instead of queried at runtime for most callers.
403  */
404 bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
405 {
406         return dev_priv->fbc.active;
407 }
408
409 static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
410                                  const char *reason)
411 {
412         struct intel_fbc *fbc = &dev_priv->fbc;
413
414         WARN_ON(!mutex_is_locked(&fbc->lock));
415
416         if (fbc->active)
417                 intel_fbc_hw_deactivate(dev_priv);
418
419         fbc->no_fbc_reason = reason;
420 }
421
422 static bool multiple_pipes_ok(struct intel_crtc *crtc,
423                               const struct intel_plane_state *plane_state)
424 {
425         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
426         struct intel_fbc *fbc = &dev_priv->fbc;
427         enum pipe pipe = crtc->pipe;
428
429         /* Don't even bother tracking anything we don't need. */
430         if (!no_fbc_on_multiple_pipes(dev_priv))
431                 return true;
432
433         if (plane_state->uapi.visible)
434                 fbc->visible_pipes_mask |= (1 << pipe);
435         else
436                 fbc->visible_pipes_mask &= ~(1 << pipe);
437
438         return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
439 }
440
441 static int find_compression_threshold(struct drm_i915_private *dev_priv,
442                                       struct drm_mm_node *node,
443                                       int size,
444                                       int fb_cpp)
445 {
446         int compression_threshold = 1;
447         int ret;
448         u64 end;
449
450         /* The FBC hardware for BDW/SKL doesn't have access to the stolen
451          * reserved range size, so it always assumes the maximum (8mb) is used.
452          * If we enable FBC using a CFB on that memory range we'll get FIFO
453          * underruns, even if that range is not reserved by the BIOS. */
454         if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
455                 end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
456         else
457                 end = U64_MAX;
458
459         /* HACK: This code depends on what we will do in *_enable_fbc. If that
460          * code changes, this code needs to change as well.
461          *
462          * The enable_fbc code will attempt to use one of our 2 compression
463          * thresholds, therefore, in that case, we only have 1 resort.
464          */
465
466         /* Try to over-allocate to reduce reallocations and fragmentation. */
467         ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
468                                                    4096, 0, end);
469         if (ret == 0)
470                 return compression_threshold;
471
472 again:
473         /* HW's ability to limit the CFB is 1:4 */
474         if (compression_threshold > 4 ||
475             (fb_cpp == 2 && compression_threshold == 2))
476                 return 0;
477
478         ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
479                                                    4096, 0, end);
480         if (ret && INTEL_GEN(dev_priv) <= 4) {
481                 return 0;
482         } else if (ret) {
483                 compression_threshold <<= 1;
484                 goto again;
485         } else {
486                 return compression_threshold;
487         }
488 }
489
490 static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
491 {
492         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
493         struct intel_fbc *fbc = &dev_priv->fbc;
494         struct drm_mm_node *uninitialized_var(compressed_llb);
495         int size, fb_cpp, ret;
496
497         WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
498
499         size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
500         fb_cpp = fbc->state_cache.fb.format->cpp[0];
501
502         ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
503                                          size, fb_cpp);
504         if (!ret)
505                 goto err_llb;
506         else if (ret > 1) {
507                 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
508
509         }
510
511         fbc->threshold = ret;
512
513         if (INTEL_GEN(dev_priv) >= 5)
514                 I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
515         else if (IS_GM45(dev_priv)) {
516                 I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
517         } else {
518                 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
519                 if (!compressed_llb)
520                         goto err_fb;
521
522                 ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
523                                                   4096, 4096);
524                 if (ret)
525                         goto err_fb;
526
527                 fbc->compressed_llb = compressed_llb;
528
529                 GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
530                                              fbc->compressed_fb.start,
531                                              U32_MAX));
532                 GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
533                                              fbc->compressed_llb->start,
534                                              U32_MAX));
535                 I915_WRITE(FBC_CFB_BASE,
536                            dev_priv->dsm.start + fbc->compressed_fb.start);
537                 I915_WRITE(FBC_LL_BASE,
538                            dev_priv->dsm.start + compressed_llb->start);
539         }
540
541         DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
542                       fbc->compressed_fb.size, fbc->threshold);
543
544         return 0;
545
546 err_fb:
547         kfree(compressed_llb);
548         i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
549 err_llb:
550         if (drm_mm_initialized(&dev_priv->mm.stolen))
551                 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
552         return -ENOSPC;
553 }
554
555 static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
556 {
557         struct intel_fbc *fbc = &dev_priv->fbc;
558
559         if (drm_mm_node_allocated(&fbc->compressed_fb))
560                 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
561
562         if (fbc->compressed_llb) {
563                 i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
564                 kfree(fbc->compressed_llb);
565         }
566 }
567
568 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
569 {
570         struct intel_fbc *fbc = &dev_priv->fbc;
571
572         if (!fbc_supported(dev_priv))
573                 return;
574
575         mutex_lock(&fbc->lock);
576         __intel_fbc_cleanup_cfb(dev_priv);
577         mutex_unlock(&fbc->lock);
578 }
579
580 static bool stride_is_valid(struct drm_i915_private *dev_priv,
581                             unsigned int stride)
582 {
583         /* This should have been caught earlier. */
584         if (WARN_ON_ONCE((stride & (64 - 1)) != 0))
585                 return false;
586
587         /* Below are the additional FBC restrictions. */
588         if (stride < 512)
589                 return false;
590
591         if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3))
592                 return stride == 4096 || stride == 8192;
593
594         if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
595                 return false;
596
597         if (stride > 16384)
598                 return false;
599
600         return true;
601 }
602
603 static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
604                                   u32 pixel_format)
605 {
606         switch (pixel_format) {
607         case DRM_FORMAT_XRGB8888:
608         case DRM_FORMAT_XBGR8888:
609                 return true;
610         case DRM_FORMAT_XRGB1555:
611         case DRM_FORMAT_RGB565:
612                 /* 16bpp not supported on gen2 */
613                 if (IS_GEN(dev_priv, 2))
614                         return false;
615                 /* WaFbcOnly1to1Ratio:ctg */
616                 if (IS_G4X(dev_priv))
617                         return false;
618                 return true;
619         default:
620                 return false;
621         }
622 }
623
624 /*
625  * For some reason, the hardware tracking starts looking at whatever we
626  * programmed as the display plane base address register. It does not look at
627  * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
628  * variables instead of just looking at the pipe/plane size.
629  */
630 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
631 {
632         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
633         struct intel_fbc *fbc = &dev_priv->fbc;
634         unsigned int effective_w, effective_h, max_w, max_h;
635
636         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
637                 max_w = 5120;
638                 max_h = 4096;
639         } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
640                 max_w = 4096;
641                 max_h = 4096;
642         } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
643                 max_w = 4096;
644                 max_h = 2048;
645         } else {
646                 max_w = 2048;
647                 max_h = 1536;
648         }
649
650         intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
651                                         &effective_h);
652         effective_w += fbc->state_cache.plane.adjusted_x;
653         effective_h += fbc->state_cache.plane.adjusted_y;
654
655         return effective_w <= max_w && effective_h <= max_h;
656 }
657
658 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
659                                          const struct intel_crtc_state *crtc_state,
660                                          const struct intel_plane_state *plane_state)
661 {
662         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
663         struct intel_fbc *fbc = &dev_priv->fbc;
664         struct intel_fbc_state_cache *cache = &fbc->state_cache;
665         struct drm_framebuffer *fb = plane_state->hw.fb;
666
667         cache->vma = NULL;
668         cache->flags = 0;
669
670         cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags;
671         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
672                 cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
673
674         cache->plane.rotation = plane_state->hw.rotation;
675         /*
676          * Src coordinates are already rotated by 270 degrees for
677          * the 90/270 degree plane rotation cases (to match the
678          * GTT mapping), hence no need to account for rotation here.
679          */
680         cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
681         cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
682         cache->plane.visible = plane_state->uapi.visible;
683         cache->plane.adjusted_x = plane_state->color_plane[0].x;
684         cache->plane.adjusted_y = plane_state->color_plane[0].y;
685         cache->plane.y = plane_state->uapi.src.y1 >> 16;
686
687         cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
688
689         if (!cache->plane.visible)
690                 return;
691
692         cache->fb.format = fb->format;
693         cache->fb.stride = fb->pitches[0];
694
695         cache->vma = plane_state->vma;
696         cache->flags = plane_state->flags;
697         if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence))
698                 cache->flags &= ~PLANE_HAS_FENCE;
699 }
700
701 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
702 {
703         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
704         struct intel_fbc *fbc = &dev_priv->fbc;
705         struct intel_fbc_state_cache *cache = &fbc->state_cache;
706
707         /* We don't need to use a state cache here since this information is
708          * global for all CRTC.
709          */
710         if (fbc->underrun_detected) {
711                 fbc->no_fbc_reason = "underrun detected";
712                 return false;
713         }
714
715         if (!cache->vma) {
716                 fbc->no_fbc_reason = "primary plane not visible";
717                 return false;
718         }
719
720         if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) {
721                 fbc->no_fbc_reason = "incompatible mode";
722                 return false;
723         }
724
725         if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
726                 fbc->no_fbc_reason = "mode too large for compression";
727                 return false;
728         }
729
730         /* The use of a CPU fence is mandatory in order to detect writes
731          * by the CPU to the scanout and trigger updates to the FBC.
732          *
733          * Note that is possible for a tiled surface to be unmappable (and
734          * so have no fence associated with it) due to aperture constaints
735          * at the time of pinning.
736          *
737          * FIXME with 90/270 degree rotation we should use the fence on
738          * the normal GTT view (the rotated view doesn't even have a
739          * fence). Would need changes to the FBC fence Y offset as well.
740          * For now this will effecively disable FBC with 90/270 degree
741          * rotation.
742          */
743         if (!(cache->flags & PLANE_HAS_FENCE)) {
744                 fbc->no_fbc_reason = "framebuffer not tiled or fenced";
745                 return false;
746         }
747         if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
748             cache->plane.rotation != DRM_MODE_ROTATE_0) {
749                 fbc->no_fbc_reason = "rotation unsupported";
750                 return false;
751         }
752
753         if (!stride_is_valid(dev_priv, cache->fb.stride)) {
754                 fbc->no_fbc_reason = "framebuffer stride not supported";
755                 return false;
756         }
757
758         if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
759                 fbc->no_fbc_reason = "pixel format is invalid";
760                 return false;
761         }
762
763         if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
764             cache->fb.format->has_alpha) {
765                 fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
766                 return false;
767         }
768
769         /* WaFbcExceedCdClockThreshold:hsw,bdw */
770         if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
771             cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
772                 fbc->no_fbc_reason = "pixel rate is too big";
773                 return false;
774         }
775
776         /* It is possible for the required CFB size change without a
777          * crtc->disable + crtc->enable since it is possible to change the
778          * stride without triggering a full modeset. Since we try to
779          * over-allocate the CFB, there's a chance we may keep FBC enabled even
780          * if this happens, but if we exceed the current CFB size we'll have to
781          * disable FBC. Notice that it would be possible to disable FBC, wait
782          * for a frame, free the stolen node, then try to reenable FBC in case
783          * we didn't get any invalidate/deactivate calls, but this would require
784          * a lot of tracking just for a specific case. If we conclude it's an
785          * important case, we can implement it later. */
786         if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
787             fbc->compressed_fb.size * fbc->threshold) {
788                 fbc->no_fbc_reason = "CFB requirements changed";
789                 return false;
790         }
791
792         /*
793          * Work around a problem on GEN9+ HW, where enabling FBC on a plane
794          * having a Y offset that isn't divisible by 4 causes FIFO underrun
795          * and screen flicker.
796          */
797         if (IS_GEN_RANGE(dev_priv, 9, 10) &&
798             (fbc->state_cache.plane.adjusted_y & 3)) {
799                 fbc->no_fbc_reason = "plane Y offset is misaligned";
800                 return false;
801         }
802
803         return true;
804 }
805
806 static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
807 {
808         struct intel_fbc *fbc = &dev_priv->fbc;
809
810         if (intel_vgpu_active(dev_priv)) {
811                 fbc->no_fbc_reason = "VGPU is active";
812                 return false;
813         }
814
815         if (!i915_modparams.enable_fbc) {
816                 fbc->no_fbc_reason = "disabled per module param or by default";
817                 return false;
818         }
819
820         if (fbc->underrun_detected) {
821                 fbc->no_fbc_reason = "underrun detected";
822                 return false;
823         }
824
825         return true;
826 }
827
828 static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
829                                      struct intel_fbc_reg_params *params)
830 {
831         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
832         struct intel_fbc *fbc = &dev_priv->fbc;
833         struct intel_fbc_state_cache *cache = &fbc->state_cache;
834
835         /* Since all our fields are integer types, use memset here so the
836          * comparison function can rely on memcmp because the padding will be
837          * zero. */
838         memset(params, 0, sizeof(*params));
839
840         params->vma = cache->vma;
841         params->flags = cache->flags;
842
843         params->crtc.pipe = crtc->pipe;
844         params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
845         params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
846
847         params->fb.format = cache->fb.format;
848         params->fb.stride = cache->fb.stride;
849
850         params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
851
852         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
853                 params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
854                                                 32 * fbc->threshold) * 8;
855 }
856
857 void intel_fbc_pre_update(struct intel_crtc *crtc,
858                           const struct intel_crtc_state *crtc_state,
859                           const struct intel_plane_state *plane_state)
860 {
861         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
862         struct intel_fbc *fbc = &dev_priv->fbc;
863         const char *reason = "update pending";
864
865         if (!fbc_supported(dev_priv))
866                 return;
867
868         mutex_lock(&fbc->lock);
869
870         if (!multiple_pipes_ok(crtc, plane_state)) {
871                 reason = "more than one pipe active";
872                 goto deactivate;
873         }
874
875         if (!fbc->enabled || fbc->crtc != crtc)
876                 goto unlock;
877
878         intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
879         fbc->flip_pending = true;
880
881 deactivate:
882         intel_fbc_deactivate(dev_priv, reason);
883 unlock:
884         mutex_unlock(&fbc->lock);
885 }
886
887 /**
888  * __intel_fbc_disable - disable FBC
889  * @dev_priv: i915 device instance
890  *
891  * This is the low level function that actually disables FBC. Callers should
892  * grab the FBC lock.
893  */
894 static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
895 {
896         struct intel_fbc *fbc = &dev_priv->fbc;
897         struct intel_crtc *crtc = fbc->crtc;
898
899         WARN_ON(!mutex_is_locked(&fbc->lock));
900         WARN_ON(!fbc->enabled);
901         WARN_ON(fbc->active);
902
903         DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
904
905         __intel_fbc_cleanup_cfb(dev_priv);
906
907         fbc->enabled = false;
908         fbc->crtc = NULL;
909 }
910
911 static void __intel_fbc_post_update(struct intel_crtc *crtc)
912 {
913         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
914         struct intel_fbc *fbc = &dev_priv->fbc;
915
916         WARN_ON(!mutex_is_locked(&fbc->lock));
917
918         if (!fbc->enabled || fbc->crtc != crtc)
919                 return;
920
921         fbc->flip_pending = false;
922         WARN_ON(fbc->active);
923
924         if (!i915_modparams.enable_fbc) {
925                 intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
926                 __intel_fbc_disable(dev_priv);
927
928                 return;
929         }
930
931         intel_fbc_get_reg_params(crtc, &fbc->params);
932
933         if (!intel_fbc_can_activate(crtc))
934                 return;
935
936         if (!fbc->busy_bits) {
937                 intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
938                 intel_fbc_hw_activate(dev_priv);
939         } else
940                 intel_fbc_deactivate(dev_priv, "frontbuffer write");
941 }
942
943 void intel_fbc_post_update(struct intel_crtc *crtc)
944 {
945         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
946         struct intel_fbc *fbc = &dev_priv->fbc;
947
948         if (!fbc_supported(dev_priv))
949                 return;
950
951         mutex_lock(&fbc->lock);
952         __intel_fbc_post_update(crtc);
953         mutex_unlock(&fbc->lock);
954 }
955
956 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
957 {
958         if (fbc->enabled)
959                 return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
960         else
961                 return fbc->possible_framebuffer_bits;
962 }
963
964 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
965                           unsigned int frontbuffer_bits,
966                           enum fb_op_origin origin)
967 {
968         struct intel_fbc *fbc = &dev_priv->fbc;
969
970         if (!fbc_supported(dev_priv))
971                 return;
972
973         if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
974                 return;
975
976         mutex_lock(&fbc->lock);
977
978         fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
979
980         if (fbc->enabled && fbc->busy_bits)
981                 intel_fbc_deactivate(dev_priv, "frontbuffer write");
982
983         mutex_unlock(&fbc->lock);
984 }
985
986 void intel_fbc_flush(struct drm_i915_private *dev_priv,
987                      unsigned int frontbuffer_bits, enum fb_op_origin origin)
988 {
989         struct intel_fbc *fbc = &dev_priv->fbc;
990
991         if (!fbc_supported(dev_priv))
992                 return;
993
994         mutex_lock(&fbc->lock);
995
996         fbc->busy_bits &= ~frontbuffer_bits;
997
998         if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
999                 goto out;
1000
1001         if (!fbc->busy_bits && fbc->enabled &&
1002             (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
1003                 if (fbc->active)
1004                         intel_fbc_recompress(dev_priv);
1005                 else if (!fbc->flip_pending)
1006                         __intel_fbc_post_update(fbc->crtc);
1007         }
1008
1009 out:
1010         mutex_unlock(&fbc->lock);
1011 }
1012
1013 /**
1014  * intel_fbc_choose_crtc - select a CRTC to enable FBC on
1015  * @dev_priv: i915 device instance
1016  * @state: the atomic state structure
1017  *
1018  * This function looks at the proposed state for CRTCs and planes, then chooses
1019  * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
1020  * true.
1021  *
1022  * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
1023  * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
1024  */
1025 void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
1026                            struct intel_atomic_state *state)
1027 {
1028         struct intel_fbc *fbc = &dev_priv->fbc;
1029         struct intel_plane *plane;
1030         struct intel_plane_state *plane_state;
1031         bool crtc_chosen = false;
1032         int i;
1033
1034         mutex_lock(&fbc->lock);
1035
1036         /* Does this atomic commit involve the CRTC currently tied to FBC? */
1037         if (fbc->crtc &&
1038             !intel_atomic_get_new_crtc_state(state, fbc->crtc))
1039                 goto out;
1040
1041         if (!intel_fbc_can_enable(dev_priv))
1042                 goto out;
1043
1044         /* Simply choose the first CRTC that is compatible and has a visible
1045          * plane. We could go for fancier schemes such as checking the plane
1046          * size, but this would just affect the few platforms that don't tie FBC
1047          * to pipe or plane A. */
1048         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1049                 struct intel_crtc_state *crtc_state;
1050                 struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
1051
1052                 if (!plane->has_fbc)
1053                         continue;
1054
1055                 if (!plane_state->uapi.visible)
1056                         continue;
1057
1058                 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1059
1060                 crtc_state->enable_fbc = true;
1061                 crtc_chosen = true;
1062                 break;
1063         }
1064
1065         if (!crtc_chosen)
1066                 fbc->no_fbc_reason = "no suitable CRTC for FBC";
1067
1068 out:
1069         mutex_unlock(&fbc->lock);
1070 }
1071
1072 /**
1073  * intel_fbc_enable: tries to enable FBC on the CRTC
1074  * @crtc: the CRTC
1075  * @crtc_state: corresponding &drm_crtc_state for @crtc
1076  * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
1077  *
1078  * This function checks if the given CRTC was chosen for FBC, then enables it if
1079  * possible. Notice that it doesn't activate FBC. It is valid to call
1080  * intel_fbc_enable multiple times for the same pipe without an
1081  * intel_fbc_disable in the middle, as long as it is deactivated.
1082  */
1083 void intel_fbc_enable(struct intel_crtc *crtc,
1084                       const struct intel_crtc_state *crtc_state,
1085                       const struct intel_plane_state *plane_state)
1086 {
1087         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1088         struct intel_fbc *fbc = &dev_priv->fbc;
1089
1090         if (!fbc_supported(dev_priv))
1091                 return;
1092
1093         mutex_lock(&fbc->lock);
1094
1095         if (fbc->enabled) {
1096                 WARN_ON(fbc->crtc == NULL);
1097                 if (fbc->crtc == crtc) {
1098                         WARN_ON(!crtc_state->enable_fbc);
1099                         WARN_ON(fbc->active);
1100                 }
1101                 goto out;
1102         }
1103
1104         if (!crtc_state->enable_fbc)
1105                 goto out;
1106
1107         WARN_ON(fbc->active);
1108         WARN_ON(fbc->crtc != NULL);
1109
1110         intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
1111         if (intel_fbc_alloc_cfb(crtc)) {
1112                 fbc->no_fbc_reason = "not enough stolen memory";
1113                 goto out;
1114         }
1115
1116         DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
1117         fbc->no_fbc_reason = "FBC enabled but not active yet\n";
1118
1119         fbc->enabled = true;
1120         fbc->crtc = crtc;
1121 out:
1122         mutex_unlock(&fbc->lock);
1123 }
1124
1125 /**
1126  * intel_fbc_disable - disable FBC if it's associated with crtc
1127  * @crtc: the CRTC
1128  *
1129  * This function disables FBC if it's associated with the provided CRTC.
1130  */
1131 void intel_fbc_disable(struct intel_crtc *crtc)
1132 {
1133         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1134         struct intel_fbc *fbc = &dev_priv->fbc;
1135
1136         if (!fbc_supported(dev_priv))
1137                 return;
1138
1139         mutex_lock(&fbc->lock);
1140         if (fbc->crtc == crtc)
1141                 __intel_fbc_disable(dev_priv);
1142         mutex_unlock(&fbc->lock);
1143 }
1144
1145 /**
1146  * intel_fbc_global_disable - globally disable FBC
1147  * @dev_priv: i915 device instance
1148  *
1149  * This function disables FBC regardless of which CRTC is associated with it.
1150  */
1151 void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
1152 {
1153         struct intel_fbc *fbc = &dev_priv->fbc;
1154
1155         if (!fbc_supported(dev_priv))
1156                 return;
1157
1158         mutex_lock(&fbc->lock);
1159         if (fbc->enabled) {
1160                 WARN_ON(fbc->crtc->active);
1161                 __intel_fbc_disable(dev_priv);
1162         }
1163         mutex_unlock(&fbc->lock);
1164 }
1165
1166 static void intel_fbc_underrun_work_fn(struct work_struct *work)
1167 {
1168         struct drm_i915_private *dev_priv =
1169                 container_of(work, struct drm_i915_private, fbc.underrun_work);
1170         struct intel_fbc *fbc = &dev_priv->fbc;
1171
1172         mutex_lock(&fbc->lock);
1173
1174         /* Maybe we were scheduled twice. */
1175         if (fbc->underrun_detected || !fbc->enabled)
1176                 goto out;
1177
1178         DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
1179         fbc->underrun_detected = true;
1180
1181         intel_fbc_deactivate(dev_priv, "FIFO underrun");
1182 out:
1183         mutex_unlock(&fbc->lock);
1184 }
1185
1186 /*
1187  * intel_fbc_reset_underrun - reset FBC fifo underrun status.
1188  * @dev_priv: i915 device instance
1189  *
1190  * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
1191  * want to re-enable FBC after an underrun to increase test coverage.
1192  */
1193 int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
1194 {
1195         int ret;
1196
1197         cancel_work_sync(&dev_priv->fbc.underrun_work);
1198
1199         ret = mutex_lock_interruptible(&dev_priv->fbc.lock);
1200         if (ret)
1201                 return ret;
1202
1203         if (dev_priv->fbc.underrun_detected) {
1204                 DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
1205                 dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
1206         }
1207
1208         dev_priv->fbc.underrun_detected = false;
1209         mutex_unlock(&dev_priv->fbc.lock);
1210
1211         return 0;
1212 }
1213
1214 /**
1215  * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
1216  * @dev_priv: i915 device instance
1217  *
1218  * Without FBC, most underruns are harmless and don't really cause too many
1219  * problems, except for an annoying message on dmesg. With FBC, underruns can
1220  * become black screens or even worse, especially when paired with bad
1221  * watermarks. So in order for us to be on the safe side, completely disable FBC
1222  * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
1223  * already suggests that watermarks may be bad, so try to be as safe as
1224  * possible.
1225  *
1226  * This function is called from the IRQ handler.
1227  */
1228 void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
1229 {
1230         struct intel_fbc *fbc = &dev_priv->fbc;
1231
1232         if (!fbc_supported(dev_priv))
1233                 return;
1234
1235         /* There's no guarantee that underrun_detected won't be set to true
1236          * right after this check and before the work is scheduled, but that's
1237          * not a problem since we'll check it again under the work function
1238          * while FBC is locked. This check here is just to prevent us from
1239          * unnecessarily scheduling the work, and it relies on the fact that we
1240          * never switch underrun_detect back to false after it's true. */
1241         if (READ_ONCE(fbc->underrun_detected))
1242                 return;
1243
1244         schedule_work(&fbc->underrun_work);
1245 }
1246
1247 /**
1248  * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
1249  * @dev_priv: i915 device instance
1250  *
1251  * The FBC code needs to track CRTC visibility since the older platforms can't
1252  * have FBC enabled while multiple pipes are used. This function does the
1253  * initial setup at driver load to make sure FBC is matching the real hardware.
1254  */
1255 void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
1256 {
1257         struct intel_crtc *crtc;
1258
1259         /* Don't even bother tracking anything if we don't need. */
1260         if (!no_fbc_on_multiple_pipes(dev_priv))
1261                 return;
1262
1263         for_each_intel_crtc(&dev_priv->drm, crtc)
1264                 if (intel_crtc_active(crtc) &&
1265                     crtc->base.primary->state->visible)
1266                         dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
1267 }
1268
1269 /*
1270  * The DDX driver changes its behavior depending on the value it reads from
1271  * i915.enable_fbc, so sanitize it by translating the default value into either
1272  * 0 or 1 in order to allow it to know what's going on.
1273  *
1274  * Notice that this is done at driver initialization and we still allow user
1275  * space to change the value during runtime without sanitizing it again. IGT
1276  * relies on being able to change i915.enable_fbc at runtime.
1277  */
1278 static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
1279 {
1280         if (i915_modparams.enable_fbc >= 0)
1281                 return !!i915_modparams.enable_fbc;
1282
1283         if (!HAS_FBC(dev_priv))
1284                 return 0;
1285
1286         /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
1287         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
1288                 return 0;
1289
1290         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
1291                 return 1;
1292
1293         return 0;
1294 }
1295
1296 static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
1297 {
1298         /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1299         if (intel_vtd_active() &&
1300             (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
1301                 DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1302                 return true;
1303         }
1304
1305         return false;
1306 }
1307
1308 /**
1309  * intel_fbc_init - Initialize FBC
1310  * @dev_priv: the i915 device
1311  *
1312  * This function might be called during PM init process.
1313  */
1314 void intel_fbc_init(struct drm_i915_private *dev_priv)
1315 {
1316         struct intel_fbc *fbc = &dev_priv->fbc;
1317
1318         INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
1319         mutex_init(&fbc->lock);
1320         fbc->enabled = false;
1321         fbc->active = false;
1322
1323         if (!drm_mm_initialized(&dev_priv->mm.stolen))
1324                 mkwrite_device_info(dev_priv)->display.has_fbc = false;
1325
1326         if (need_fbc_vtd_wa(dev_priv))
1327                 mkwrite_device_info(dev_priv)->display.has_fbc = false;
1328
1329         i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
1330         DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
1331                       i915_modparams.enable_fbc);
1332
1333         if (!HAS_FBC(dev_priv)) {
1334                 fbc->no_fbc_reason = "unsupported by this chipset";
1335                 return;
1336         }
1337
1338         /* This value was pulled out of someone's hat */
1339         if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
1340                 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
1341
1342         /* We still don't have any sort of hardware state readout for FBC, so
1343          * deactivate it in case the BIOS activated it to make sure software
1344          * matches the hardware state. */
1345         if (intel_fbc_hw_is_active(dev_priv))
1346                 intel_fbc_hw_deactivate(dev_priv);
1347 }