2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
29 #include <drm/drm_plane_helper.h>
31 #include "intel_drv.h"
32 #include "../../../platform/x86/intel_ips.h"
33 #include <linux/module.h>
34 #include <drm/drm_atomic_helper.h>
39 * RC6 is a special power stage which allows the GPU to enter an very
40 * low-voltage mode when idle, using down to 0V while at this stage. This
41 * stage is entered automatically when the GPU is idle when RC6 support is
42 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
44 * There are different RC6 modes available in Intel GPU, which differentiate
45 * among each other with the latency required to enter and leave RC6 and
46 * voltage consumed by the GPU in different states.
48 * The combination of the following flags define which states GPU is allowed
49 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
50 * RC6pp is deepest RC6. Their support by hardware varies according to the
51 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
52 * which brings the most power savings; deeper states save more power, but
53 * require higher latency to switch to and wake up.
55 #define INTEL_RC6_ENABLE (1<<0)
56 #define INTEL_RC6p_ENABLE (1<<1)
57 #define INTEL_RC6pp_ENABLE (1<<2)
59 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
61 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
62 I915_WRITE(CHICKEN_PAR1_1,
63 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
65 I915_WRITE(GEN8_CONFIG0,
66 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
68 /* WaEnableChickenDCPR:skl,bxt,kbl,glk */
69 I915_WRITE(GEN8_CHICKEN_DCPR_1,
70 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
72 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
73 /* WaFbcWakeMemOn:skl,bxt,kbl,glk */
74 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
76 DISP_FBC_MEMORY_WAKE);
78 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
79 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
80 ILK_DPFC_DISABLE_DUMMY0);
83 static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
85 gen9_init_clock_gating(dev_priv);
87 /* WaDisableSDEUnitClockGating:bxt */
88 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
89 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
93 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
95 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
96 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
99 * Wa: Backlight PWM may stop in the asserted state, causing backlight
102 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
103 PWM1_GATING_DIS | PWM2_GATING_DIS);
106 static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
108 gen9_init_clock_gating(dev_priv);
111 * WaDisablePWMClockGating:glk
112 * Backlight PWM may stop in the asserted state, causing backlight
115 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
116 PWM1_GATING_DIS | PWM2_GATING_DIS);
118 /* WaDDIIOTimeout:glk */
119 if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
120 u32 val = I915_READ(CHICKEN_MISC_2);
121 val &= ~(GLK_CL0_PWR_DOWN |
124 I915_WRITE(CHICKEN_MISC_2, val);
129 static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
133 tmp = I915_READ(CLKCFG);
135 switch (tmp & CLKCFG_FSB_MASK) {
137 dev_priv->fsb_freq = 533; /* 133*4 */
140 dev_priv->fsb_freq = 800; /* 200*4 */
143 dev_priv->fsb_freq = 667; /* 167*4 */
146 dev_priv->fsb_freq = 400; /* 100*4 */
150 switch (tmp & CLKCFG_MEM_MASK) {
152 dev_priv->mem_freq = 533;
155 dev_priv->mem_freq = 667;
158 dev_priv->mem_freq = 800;
162 /* detect pineview DDR3 setting */
163 tmp = I915_READ(CSHRDDR3CTL);
164 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
167 static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
171 ddrpll = I915_READ16(DDRMPLL1);
172 csipll = I915_READ16(CSIPLL0);
174 switch (ddrpll & 0xff) {
176 dev_priv->mem_freq = 800;
179 dev_priv->mem_freq = 1066;
182 dev_priv->mem_freq = 1333;
185 dev_priv->mem_freq = 1600;
188 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
190 dev_priv->mem_freq = 0;
194 dev_priv->ips.r_t = dev_priv->mem_freq;
196 switch (csipll & 0x3ff) {
198 dev_priv->fsb_freq = 3200;
201 dev_priv->fsb_freq = 3733;
204 dev_priv->fsb_freq = 4266;
207 dev_priv->fsb_freq = 4800;
210 dev_priv->fsb_freq = 5333;
213 dev_priv->fsb_freq = 5866;
216 dev_priv->fsb_freq = 6400;
219 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
221 dev_priv->fsb_freq = 0;
225 if (dev_priv->fsb_freq == 3200) {
226 dev_priv->ips.c_m = 0;
227 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
228 dev_priv->ips.c_m = 1;
230 dev_priv->ips.c_m = 2;
234 static const struct cxsr_latency cxsr_latency_table[] = {
235 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
236 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
237 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
238 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
239 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
241 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
242 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
243 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
244 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
245 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
247 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
248 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
249 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
250 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
251 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
253 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
254 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
255 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
256 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
257 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
259 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
260 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
261 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
262 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
263 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
265 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
266 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
267 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
268 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
269 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
272 static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
277 const struct cxsr_latency *latency;
280 if (fsb == 0 || mem == 0)
283 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
284 latency = &cxsr_latency_table[i];
285 if (is_desktop == latency->is_desktop &&
286 is_ddr3 == latency->is_ddr3 &&
287 fsb == latency->fsb_freq && mem == latency->mem_freq)
291 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
296 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
300 mutex_lock(&dev_priv->rps.hw_lock);
302 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
304 val &= ~FORCE_DDR_HIGH_FREQ;
306 val |= FORCE_DDR_HIGH_FREQ;
307 val &= ~FORCE_DDR_LOW_FREQ;
308 val |= FORCE_DDR_FREQ_REQ_ACK;
309 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
311 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
312 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
313 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
315 mutex_unlock(&dev_priv->rps.hw_lock);
318 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
322 mutex_lock(&dev_priv->rps.hw_lock);
324 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
326 val |= DSP_MAXFIFO_PM5_ENABLE;
328 val &= ~DSP_MAXFIFO_PM5_ENABLE;
329 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
331 mutex_unlock(&dev_priv->rps.hw_lock);
334 #define FW_WM(value, plane) \
335 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
337 static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
342 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
343 was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
344 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
345 POSTING_READ(FW_BLC_SELF_VLV);
346 } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
347 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
348 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
349 POSTING_READ(FW_BLC_SELF);
350 } else if (IS_PINEVIEW(dev_priv)) {
351 val = I915_READ(DSPFW3);
352 was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
354 val |= PINEVIEW_SELF_REFRESH_EN;
356 val &= ~PINEVIEW_SELF_REFRESH_EN;
357 I915_WRITE(DSPFW3, val);
358 POSTING_READ(DSPFW3);
359 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
360 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
361 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
362 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
363 I915_WRITE(FW_BLC_SELF, val);
364 POSTING_READ(FW_BLC_SELF);
365 } else if (IS_I915GM(dev_priv)) {
367 * FIXME can't find a bit like this for 915G, and
368 * and yet it does have the related watermark in
369 * FW_BLC_SELF. What's going on?
371 was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
372 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
373 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
374 I915_WRITE(INSTPM, val);
375 POSTING_READ(INSTPM);
380 trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
382 DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
383 enableddisabled(enable),
384 enableddisabled(was_enabled));
390 * intel_set_memory_cxsr - Configure CxSR state
391 * @dev_priv: i915 device
392 * @enable: Allow vs. disallow CxSR
394 * Allow or disallow the system to enter a special CxSR
395 * (C-state self refresh) state. What typically happens in CxSR mode
396 * is that several display FIFOs may get combined into a single larger
397 * FIFO for a particular plane (so called max FIFO mode) to allow the
398 * system to defer memory fetches longer, and the memory will enter
401 * Note that enabling CxSR does not guarantee that the system enter
402 * this special mode, nor does it guarantee that the system stays
403 * in that mode once entered. So this just allows/disallows the system
404 * to autonomously utilize the CxSR mode. Other factors such as core
405 * C-states will affect when/if the system actually enters/exits the
408 * Note that on VLV/CHV this actually only controls the max FIFO mode,
409 * and the system is free to enter/exit memory self refresh at any time
410 * even when the use of CxSR has been disallowed.
412 * While the system is actually in the CxSR/max FIFO mode, some plane
413 * control registers will not get latched on vblank. Thus in order to
414 * guarantee the system will respond to changes in the plane registers
415 * we must always disallow CxSR prior to making changes to those registers.
416 * Unfortunately the system will re-evaluate the CxSR conditions at
417 * frame start which happens after vblank start (which is when the plane
418 * registers would get latched), so we can't proceed with the plane update
419 * during the same frame where we disallowed CxSR.
421 * Certain platforms also have a deeper HPLL SR mode. Fortunately the
422 * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
423 * the hardware w.r.t. HPLL SR when writing to plane registers.
424 * Disallowing just CxSR is sufficient.
426 bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
430 mutex_lock(&dev_priv->wm.wm_mutex);
431 ret = _intel_set_memory_cxsr(dev_priv, enable);
432 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
433 dev_priv->wm.vlv.cxsr = enable;
434 else if (IS_G4X(dev_priv))
435 dev_priv->wm.g4x.cxsr = enable;
436 mutex_unlock(&dev_priv->wm.wm_mutex);
442 * Latency for FIFO fetches is dependent on several factors:
443 * - memory configuration (speed, channels)
445 * - current MCH state
446 * It can be fairly high in some situations, so here we assume a fairly
447 * pessimal value. It's a tradeoff between extra memory fetches (if we
448 * set this value too high, the FIFO will fetch frequently to stay full)
449 * and power consumption (set it too low to save power and we might see
450 * FIFO underruns and display "flicker").
452 * A value of 5us seems to be a good balance; safe for very low end
453 * platforms but not overly aggressive on lower latency configs.
455 static const int pessimal_latency_ns = 5000;
457 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
458 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
460 static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
462 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
463 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
464 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
465 enum pipe pipe = crtc->pipe;
466 int sprite0_start, sprite1_start;
469 uint32_t dsparb, dsparb2, dsparb3;
471 dsparb = I915_READ(DSPARB);
472 dsparb2 = I915_READ(DSPARB2);
473 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
474 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
477 dsparb = I915_READ(DSPARB);
478 dsparb2 = I915_READ(DSPARB2);
479 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
480 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
483 dsparb2 = I915_READ(DSPARB2);
484 dsparb3 = I915_READ(DSPARB3);
485 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
486 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
493 fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
494 fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
495 fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
496 fifo_state->plane[PLANE_CURSOR] = 63;
499 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
501 uint32_t dsparb = I915_READ(DSPARB);
504 size = dsparb & 0x7f;
506 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
508 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
509 plane ? "B" : "A", size);
514 static int i830_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
516 uint32_t dsparb = I915_READ(DSPARB);
519 size = dsparb & 0x1ff;
521 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
522 size >>= 1; /* Convert to cachelines */
524 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
525 plane ? "B" : "A", size);
530 static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
532 uint32_t dsparb = I915_READ(DSPARB);
535 size = dsparb & 0x7f;
536 size >>= 2; /* Convert to cachelines */
538 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
545 /* Pineview has different values for various configs */
546 static const struct intel_watermark_params pineview_display_wm = {
547 .fifo_size = PINEVIEW_DISPLAY_FIFO,
548 .max_wm = PINEVIEW_MAX_WM,
549 .default_wm = PINEVIEW_DFT_WM,
550 .guard_size = PINEVIEW_GUARD_WM,
551 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
553 static const struct intel_watermark_params pineview_display_hplloff_wm = {
554 .fifo_size = PINEVIEW_DISPLAY_FIFO,
555 .max_wm = PINEVIEW_MAX_WM,
556 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
557 .guard_size = PINEVIEW_GUARD_WM,
558 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
560 static const struct intel_watermark_params pineview_cursor_wm = {
561 .fifo_size = PINEVIEW_CURSOR_FIFO,
562 .max_wm = PINEVIEW_CURSOR_MAX_WM,
563 .default_wm = PINEVIEW_CURSOR_DFT_WM,
564 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
565 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
567 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
568 .fifo_size = PINEVIEW_CURSOR_FIFO,
569 .max_wm = PINEVIEW_CURSOR_MAX_WM,
570 .default_wm = PINEVIEW_CURSOR_DFT_WM,
571 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
572 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
574 static const struct intel_watermark_params i965_cursor_wm_info = {
575 .fifo_size = I965_CURSOR_FIFO,
576 .max_wm = I965_CURSOR_MAX_WM,
577 .default_wm = I965_CURSOR_DFT_WM,
579 .cacheline_size = I915_FIFO_LINE_SIZE,
581 static const struct intel_watermark_params i945_wm_info = {
582 .fifo_size = I945_FIFO_SIZE,
583 .max_wm = I915_MAX_WM,
586 .cacheline_size = I915_FIFO_LINE_SIZE,
588 static const struct intel_watermark_params i915_wm_info = {
589 .fifo_size = I915_FIFO_SIZE,
590 .max_wm = I915_MAX_WM,
593 .cacheline_size = I915_FIFO_LINE_SIZE,
595 static const struct intel_watermark_params i830_a_wm_info = {
596 .fifo_size = I855GM_FIFO_SIZE,
597 .max_wm = I915_MAX_WM,
600 .cacheline_size = I830_FIFO_LINE_SIZE,
602 static const struct intel_watermark_params i830_bc_wm_info = {
603 .fifo_size = I855GM_FIFO_SIZE,
604 .max_wm = I915_MAX_WM/2,
607 .cacheline_size = I830_FIFO_LINE_SIZE,
609 static const struct intel_watermark_params i845_wm_info = {
610 .fifo_size = I830_FIFO_SIZE,
611 .max_wm = I915_MAX_WM,
614 .cacheline_size = I830_FIFO_LINE_SIZE,
618 * intel_wm_method1 - Method 1 / "small buffer" watermark formula
619 * @pixel_rate: Pipe pixel rate in kHz
620 * @cpp: Plane bytes per pixel
621 * @latency: Memory wakeup latency in 0.1us units
623 * Compute the watermark using the method 1 or "small buffer"
624 * formula. The caller may additonally add extra cachelines
625 * to account for TLB misses and clock crossings.
627 * This method is concerned with the short term drain rate
628 * of the FIFO, ie. it does not account for blanking periods
629 * which would effectively reduce the average drain rate across
630 * a longer period. The name "small" refers to the fact the
631 * FIFO is relatively small compared to the amount of data
634 * The FIFO level vs. time graph might look something like:
638 * __---__---__ (- plane active, _ blanking)
641 * or perhaps like this:
644 * __----__----__ (- plane active, _ blanking)
648 * The watermark in bytes
650 static unsigned int intel_wm_method1(unsigned int pixel_rate,
652 unsigned int latency)
656 ret = (uint64_t) pixel_rate * cpp * latency;
657 ret = DIV_ROUND_UP_ULL(ret, 10000);
663 * intel_wm_method2 - Method 2 / "large buffer" watermark formula
664 * @pixel_rate: Pipe pixel rate in kHz
665 * @htotal: Pipe horizontal total
666 * @width: Plane width in pixels
667 * @cpp: Plane bytes per pixel
668 * @latency: Memory wakeup latency in 0.1us units
670 * Compute the watermark using the method 2 or "large buffer"
671 * formula. The caller may additonally add extra cachelines
672 * to account for TLB misses and clock crossings.
674 * This method is concerned with the long term drain rate
675 * of the FIFO, ie. it does account for blanking periods
676 * which effectively reduce the average drain rate across
677 * a longer period. The name "large" refers to the fact the
678 * FIFO is relatively large compared to the amount of data
681 * The FIFO level vs. time graph might look something like:
686 * __ --__--__--__--__--__--__ (- plane active, _ blanking)
690 * The watermark in bytes
692 static unsigned int intel_wm_method2(unsigned int pixel_rate,
696 unsigned int latency)
701 * FIXME remove once all users are computing
702 * watermarks in the correct place.
704 if (WARN_ON_ONCE(htotal == 0))
707 ret = (latency * pixel_rate) / (htotal * 10000);
708 ret = (ret + 1) * width * cpp;
714 * intel_calculate_wm - calculate watermark level
715 * @pixel_rate: pixel clock
716 * @wm: chip FIFO params
717 * @cpp: bytes per pixel
718 * @latency_ns: memory latency for the platform
720 * Calculate the watermark level (the level at which the display plane will
721 * start fetching from memory again). Each chip has a different display
722 * FIFO size and allocation, so the caller needs to figure that out and pass
723 * in the correct intel_watermark_params structure.
725 * As the pixel clock runs, the FIFO will be drained at a rate that depends
726 * on the pixel size. When it reaches the watermark level, it'll start
727 * fetching FIFO line sized based chunks from memory until the FIFO fills
728 * past the watermark point. If the FIFO drains completely, a FIFO underrun
729 * will occur, and a display engine hang could result.
731 static unsigned int intel_calculate_wm(int pixel_rate,
732 const struct intel_watermark_params *wm,
733 int fifo_size, int cpp,
734 unsigned int latency_ns)
736 int entries, wm_size;
739 * Note: we need to make sure we don't overflow for various clock &
741 * clocks go from a few thousand to several hundred thousand.
742 * latency is usually a few thousand
744 entries = intel_wm_method1(pixel_rate, cpp,
746 entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
748 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
750 wm_size = fifo_size - entries;
751 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
753 /* Don't promote wm_size to unsigned... */
754 if (wm_size > wm->max_wm)
755 wm_size = wm->max_wm;
757 wm_size = wm->default_wm;
760 * Bspec seems to indicate that the value shouldn't be lower than
761 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
762 * Lets go for 8 which is the burst size since certain platforms
763 * already use a hardcoded 8 (which is what the spec says should be
772 static bool is_disabling(int old, int new, int threshold)
774 return old >= threshold && new < threshold;
777 static bool is_enabling(int old, int new, int threshold)
779 return old < threshold && new >= threshold;
782 static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
784 return dev_priv->wm.max_level + 1;
787 static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
788 const struct intel_plane_state *plane_state)
790 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
792 /* FIXME check the 'enable' instead */
793 if (!crtc_state->base.active)
797 * Treat cursor with fb as always visible since cursor updates
798 * can happen faster than the vrefresh rate, and the current
799 * watermark code doesn't handle that correctly. Cursor updates
800 * which set/clear the fb or change the cursor size are going
801 * to get throttled by intel_legacy_cursor_update() to work
802 * around this problem with the watermark code.
804 if (plane->id == PLANE_CURSOR)
805 return plane_state->base.fb != NULL;
807 return plane_state->base.visible;
810 static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
812 struct intel_crtc *crtc, *enabled = NULL;
814 for_each_intel_crtc(&dev_priv->drm, crtc) {
815 if (intel_crtc_active(crtc)) {
825 static void pineview_update_wm(struct intel_crtc *unused_crtc)
827 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
828 struct intel_crtc *crtc;
829 const struct cxsr_latency *latency;
833 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
838 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
839 intel_set_memory_cxsr(dev_priv, false);
843 crtc = single_enabled_crtc(dev_priv);
845 const struct drm_display_mode *adjusted_mode =
846 &crtc->config->base.adjusted_mode;
847 const struct drm_framebuffer *fb =
848 crtc->base.primary->state->fb;
849 int cpp = fb->format->cpp[0];
850 int clock = adjusted_mode->crtc_clock;
853 wm = intel_calculate_wm(clock, &pineview_display_wm,
854 pineview_display_wm.fifo_size,
855 cpp, latency->display_sr);
856 reg = I915_READ(DSPFW1);
857 reg &= ~DSPFW_SR_MASK;
858 reg |= FW_WM(wm, SR);
859 I915_WRITE(DSPFW1, reg);
860 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
863 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
864 pineview_display_wm.fifo_size,
865 4, latency->cursor_sr);
866 reg = I915_READ(DSPFW3);
867 reg &= ~DSPFW_CURSOR_SR_MASK;
868 reg |= FW_WM(wm, CURSOR_SR);
869 I915_WRITE(DSPFW3, reg);
871 /* Display HPLL off SR */
872 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
873 pineview_display_hplloff_wm.fifo_size,
874 cpp, latency->display_hpll_disable);
875 reg = I915_READ(DSPFW3);
876 reg &= ~DSPFW_HPLL_SR_MASK;
877 reg |= FW_WM(wm, HPLL_SR);
878 I915_WRITE(DSPFW3, reg);
880 /* cursor HPLL off SR */
881 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
882 pineview_display_hplloff_wm.fifo_size,
883 4, latency->cursor_hpll_disable);
884 reg = I915_READ(DSPFW3);
885 reg &= ~DSPFW_HPLL_CURSOR_MASK;
886 reg |= FW_WM(wm, HPLL_CURSOR);
887 I915_WRITE(DSPFW3, reg);
888 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
890 intel_set_memory_cxsr(dev_priv, true);
892 intel_set_memory_cxsr(dev_priv, false);
897 * Documentation says:
898 * "If the line size is small, the TLB fetches can get in the way of the
899 * data fetches, causing some lag in the pixel data return which is not
900 * accounted for in the above formulas. The following adjustment only
901 * needs to be applied if eight whole lines fit in the buffer at once.
902 * The WM is adjusted upwards by the difference between the FIFO size
903 * and the size of 8 whole lines. This adjustment is always performed
904 * in the actual pixel depth regardless of whether FBC is enabled or not."
906 static int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
908 int tlb_miss = fifo_size * 64 - width * cpp * 8;
910 return max(0, tlb_miss);
913 static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
914 const struct g4x_wm_values *wm)
918 for_each_pipe(dev_priv, pipe)
919 trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
922 FW_WM(wm->sr.plane, SR) |
923 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
924 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
925 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
927 (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
928 FW_WM(wm->sr.fbc, FBC_SR) |
929 FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
930 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
931 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
932 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
934 (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
935 FW_WM(wm->sr.cursor, CURSOR_SR) |
936 FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
937 FW_WM(wm->hpll.plane, HPLL_SR));
939 POSTING_READ(DSPFW1);
942 #define FW_WM_VLV(value, plane) \
943 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
945 static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
946 const struct vlv_wm_values *wm)
950 for_each_pipe(dev_priv, pipe) {
951 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
953 I915_WRITE(VLV_DDL(pipe),
954 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
955 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
956 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
957 (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
961 * Zero the (unused) WM1 watermarks, and also clear all the
962 * high order bits so that there are no out of bounds values
963 * present in the registers during the reprogramming.
965 I915_WRITE(DSPHOWM, 0);
966 I915_WRITE(DSPHOWM1, 0);
967 I915_WRITE(DSPFW4, 0);
968 I915_WRITE(DSPFW5, 0);
969 I915_WRITE(DSPFW6, 0);
972 FW_WM(wm->sr.plane, SR) |
973 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
974 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
975 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
977 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
978 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
979 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
981 FW_WM(wm->sr.cursor, CURSOR_SR));
983 if (IS_CHERRYVIEW(dev_priv)) {
984 I915_WRITE(DSPFW7_CHV,
985 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
986 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
987 I915_WRITE(DSPFW8_CHV,
988 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
989 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
990 I915_WRITE(DSPFW9_CHV,
991 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
992 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
994 FW_WM(wm->sr.plane >> 9, SR_HI) |
995 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
996 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
997 FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
998 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
999 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1000 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1001 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1002 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1003 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1006 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1007 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1009 FW_WM(wm->sr.plane >> 9, SR_HI) |
1010 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1011 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1012 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1013 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1014 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1015 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1018 POSTING_READ(DSPFW1);
1023 static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
1025 /* all latencies in usec */
1026 dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
1027 dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
1028 dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
1030 dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
1033 static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
1036 * DSPCNTR[13] supposedly controls whether the
1037 * primary plane can use the FIFO space otherwise
1038 * reserved for the sprite plane. It's not 100% clear
1039 * what the actual FIFO size is, but it looks like we
1040 * can happily set both primary and sprite watermarks
1041 * up to 127 cachelines. So that would seem to mean
1042 * that either DSPCNTR[13] doesn't do anything, or that
1043 * the total FIFO is >= 256 cachelines in size. Either
1044 * way, we don't seem to have to worry about this
1045 * repartitioning as the maximum watermark value the
1046 * register can hold for each plane is lower than the
1047 * minimum FIFO size.
1053 return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
1055 return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
1057 MISSING_CASE(plane_id);
1062 static int g4x_fbc_fifo_size(int level)
1065 case G4X_WM_LEVEL_SR:
1067 case G4X_WM_LEVEL_HPLL:
1070 MISSING_CASE(level);
1075 static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
1076 const struct intel_plane_state *plane_state,
1079 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1080 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1081 const struct drm_display_mode *adjusted_mode =
1082 &crtc_state->base.adjusted_mode;
1083 int clock, htotal, cpp, width, wm;
1084 int latency = dev_priv->wm.pri_latency[level] * 10;
1089 if (!intel_wm_plane_visible(crtc_state, plane_state))
1093 * Not 100% sure which way ELK should go here as the
1094 * spec only says CL/CTG should assume 32bpp and BW
1095 * doesn't need to. But as these things followed the
1096 * mobile vs. desktop lines on gen3 as well, let's
1097 * assume ELK doesn't need this.
1099 * The spec also fails to list such a restriction for
1100 * the HPLL watermark, which seems a little strange.
1101 * Let's use 32bpp for the HPLL watermark as well.
1103 if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
1104 level != G4X_WM_LEVEL_NORMAL)
1107 cpp = plane_state->base.fb->format->cpp[0];
1109 clock = adjusted_mode->crtc_clock;
1110 htotal = adjusted_mode->crtc_htotal;
1112 if (plane->id == PLANE_CURSOR)
1113 width = plane_state->base.crtc_w;
1115 width = drm_rect_width(&plane_state->base.dst);
1117 if (plane->id == PLANE_CURSOR) {
1118 wm = intel_wm_method2(clock, htotal, width, cpp, latency);
1119 } else if (plane->id == PLANE_PRIMARY &&
1120 level == G4X_WM_LEVEL_NORMAL) {
1121 wm = intel_wm_method1(clock, cpp, latency);
1125 small = intel_wm_method1(clock, cpp, latency);
1126 large = intel_wm_method2(clock, htotal, width, cpp, latency);
1128 wm = min(small, large);
1131 wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
1134 wm = DIV_ROUND_UP(wm, 64) + 2;
1136 return min_t(int, wm, USHRT_MAX);
1139 static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1140 int level, enum plane_id plane_id, u16 value)
1142 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1145 for (; level < intel_wm_num_levels(dev_priv); level++) {
1146 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1148 dirty |= raw->plane[plane_id] != value;
1149 raw->plane[plane_id] = value;
1155 static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
1156 int level, u16 value)
1158 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1161 /* NORMAL level doesn't have an FBC watermark */
1162 level = max(level, G4X_WM_LEVEL_SR);
1164 for (; level < intel_wm_num_levels(dev_priv); level++) {
1165 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1167 dirty |= raw->fbc != value;
1174 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1175 const struct intel_plane_state *pstate,
1178 static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1179 const struct intel_plane_state *plane_state)
1181 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1182 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1183 enum plane_id plane_id = plane->id;
1187 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1188 dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1189 if (plane_id == PLANE_PRIMARY)
1190 dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
1194 for (level = 0; level < num_levels; level++) {
1195 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1198 wm = g4x_compute_wm(crtc_state, plane_state, level);
1199 max_wm = g4x_plane_fifo_size(plane_id, level);
1204 dirty |= raw->plane[plane_id] != wm;
1205 raw->plane[plane_id] = wm;
1207 if (plane_id != PLANE_PRIMARY ||
1208 level == G4X_WM_LEVEL_NORMAL)
1211 wm = ilk_compute_fbc_wm(crtc_state, plane_state,
1212 raw->plane[plane_id]);
1213 max_wm = g4x_fbc_fifo_size(level);
1216 * FBC wm is not mandatory as we
1217 * can always just disable its use.
1222 dirty |= raw->fbc != wm;
1226 /* mark watermarks as invalid */
1227 dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1229 if (plane_id == PLANE_PRIMARY)
1230 dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
1234 DRM_DEBUG_KMS("%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1236 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
1237 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
1238 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
1240 if (plane_id == PLANE_PRIMARY)
1241 DRM_DEBUG_KMS("FBC watermarks: SR=%d, HPLL=%d\n",
1242 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
1243 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
1249 static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1250 enum plane_id plane_id, int level)
1252 const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1254 return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
1257 static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
1260 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1262 if (level > dev_priv->wm.max_level)
1265 return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1266 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1267 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1270 /* mark all levels starting from 'level' as invalid */
1271 static void g4x_invalidate_wms(struct intel_crtc *crtc,
1272 struct g4x_wm_state *wm_state, int level)
1274 if (level <= G4X_WM_LEVEL_NORMAL) {
1275 enum plane_id plane_id;
1277 for_each_plane_id_on_crtc(crtc, plane_id)
1278 wm_state->wm.plane[plane_id] = USHRT_MAX;
1281 if (level <= G4X_WM_LEVEL_SR) {
1282 wm_state->cxsr = false;
1283 wm_state->sr.cursor = USHRT_MAX;
1284 wm_state->sr.plane = USHRT_MAX;
1285 wm_state->sr.fbc = USHRT_MAX;
1288 if (level <= G4X_WM_LEVEL_HPLL) {
1289 wm_state->hpll_en = false;
1290 wm_state->hpll.cursor = USHRT_MAX;
1291 wm_state->hpll.plane = USHRT_MAX;
1292 wm_state->hpll.fbc = USHRT_MAX;
1296 static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1298 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1299 struct intel_atomic_state *state =
1300 to_intel_atomic_state(crtc_state->base.state);
1301 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
1302 int num_active_planes = hweight32(crtc_state->active_planes &
1303 ~BIT(PLANE_CURSOR));
1304 const struct g4x_pipe_wm *raw;
1305 struct intel_plane_state *plane_state;
1306 struct intel_plane *plane;
1307 enum plane_id plane_id;
1309 unsigned int dirty = 0;
1311 for_each_intel_plane_in_state(state, plane, plane_state, i) {
1312 const struct intel_plane_state *old_plane_state =
1313 to_intel_plane_state(plane->base.state);
1315 if (plane_state->base.crtc != &crtc->base &&
1316 old_plane_state->base.crtc != &crtc->base)
1319 if (g4x_raw_plane_wm_compute(crtc_state, plane_state))
1320 dirty |= BIT(plane->id);
1326 level = G4X_WM_LEVEL_NORMAL;
1327 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1330 raw = &crtc_state->wm.g4x.raw[level];
1331 for_each_plane_id_on_crtc(crtc, plane_id)
1332 wm_state->wm.plane[plane_id] = raw->plane[plane_id];
1334 level = G4X_WM_LEVEL_SR;
1336 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1339 raw = &crtc_state->wm.g4x.raw[level];
1340 wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
1341 wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
1342 wm_state->sr.fbc = raw->fbc;
1344 wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY);
1346 level = G4X_WM_LEVEL_HPLL;
1348 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1351 raw = &crtc_state->wm.g4x.raw[level];
1352 wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
1353 wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
1354 wm_state->hpll.fbc = raw->fbc;
1356 wm_state->hpll_en = wm_state->cxsr;
1361 if (level == G4X_WM_LEVEL_NORMAL)
1364 /* invalidate the higher levels */
1365 g4x_invalidate_wms(crtc, wm_state, level);
1368 * Determine if the FBC watermark(s) can be used. IF
1369 * this isn't the case we prefer to disable the FBC
1370 ( watermark(s) rather than disable the SR/HPLL
1371 * level(s) entirely.
1373 wm_state->fbc_en = level > G4X_WM_LEVEL_NORMAL;
1375 if (level >= G4X_WM_LEVEL_SR &&
1376 wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
1377 wm_state->fbc_en = false;
1378 else if (level >= G4X_WM_LEVEL_HPLL &&
1379 wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
1380 wm_state->fbc_en = false;
1385 static int g4x_compute_intermediate_wm(struct drm_device *dev,
1386 struct intel_crtc *crtc,
1387 struct intel_crtc_state *crtc_state)
1389 struct g4x_wm_state *intermediate = &crtc_state->wm.g4x.intermediate;
1390 const struct g4x_wm_state *optimal = &crtc_state->wm.g4x.optimal;
1391 const struct g4x_wm_state *active = &crtc->wm.active.g4x;
1392 enum plane_id plane_id;
1394 intermediate->cxsr = optimal->cxsr && active->cxsr &&
1395 !crtc_state->disable_cxsr;
1396 intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
1397 !crtc_state->disable_cxsr;
1398 intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
1400 for_each_plane_id_on_crtc(crtc, plane_id) {
1401 intermediate->wm.plane[plane_id] =
1402 max(optimal->wm.plane[plane_id],
1403 active->wm.plane[plane_id]);
1405 WARN_ON(intermediate->wm.plane[plane_id] >
1406 g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
1409 intermediate->sr.plane = max(optimal->sr.plane,
1411 intermediate->sr.cursor = max(optimal->sr.cursor,
1413 intermediate->sr.fbc = max(optimal->sr.fbc,
1416 intermediate->hpll.plane = max(optimal->hpll.plane,
1417 active->hpll.plane);
1418 intermediate->hpll.cursor = max(optimal->hpll.cursor,
1419 active->hpll.cursor);
1420 intermediate->hpll.fbc = max(optimal->hpll.fbc,
1423 WARN_ON((intermediate->sr.plane >
1424 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
1425 intermediate->sr.cursor >
1426 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
1427 intermediate->cxsr);
1428 WARN_ON((intermediate->sr.plane >
1429 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
1430 intermediate->sr.cursor >
1431 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
1432 intermediate->hpll_en);
1434 WARN_ON(intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
1435 intermediate->fbc_en && intermediate->cxsr);
1436 WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
1437 intermediate->fbc_en && intermediate->hpll_en);
1440 * If our intermediate WM are identical to the final WM, then we can
1441 * omit the post-vblank programming; only update if it's different.
1443 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1444 crtc_state->wm.need_postvbl_update = true;
1449 static void g4x_merge_wm(struct drm_i915_private *dev_priv,
1450 struct g4x_wm_values *wm)
1452 struct intel_crtc *crtc;
1453 int num_active_crtcs = 0;
1459 for_each_intel_crtc(&dev_priv->drm, crtc) {
1460 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1465 if (!wm_state->cxsr)
1467 if (!wm_state->hpll_en)
1468 wm->hpll_en = false;
1469 if (!wm_state->fbc_en)
1475 if (num_active_crtcs != 1) {
1477 wm->hpll_en = false;
1481 for_each_intel_crtc(&dev_priv->drm, crtc) {
1482 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1483 enum pipe pipe = crtc->pipe;
1485 wm->pipe[pipe] = wm_state->wm;
1486 if (crtc->active && wm->cxsr)
1487 wm->sr = wm_state->sr;
1488 if (crtc->active && wm->hpll_en)
1489 wm->hpll = wm_state->hpll;
1493 static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
1495 struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
1496 struct g4x_wm_values new_wm = {};
1498 g4x_merge_wm(dev_priv, &new_wm);
1500 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
1503 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1504 _intel_set_memory_cxsr(dev_priv, false);
1506 g4x_write_wm_values(dev_priv, &new_wm);
1508 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1509 _intel_set_memory_cxsr(dev_priv, true);
1514 static void g4x_initial_watermarks(struct intel_atomic_state *state,
1515 struct intel_crtc_state *crtc_state)
1517 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1518 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1520 mutex_lock(&dev_priv->wm.wm_mutex);
1521 crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
1522 g4x_program_watermarks(dev_priv);
1523 mutex_unlock(&dev_priv->wm.wm_mutex);
1526 static void g4x_optimize_watermarks(struct intel_atomic_state *state,
1527 struct intel_crtc_state *crtc_state)
1529 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1530 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
1532 if (!crtc_state->wm.need_postvbl_update)
1535 mutex_lock(&dev_priv->wm.wm_mutex);
1536 intel_crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
1537 g4x_program_watermarks(dev_priv);
1538 mutex_unlock(&dev_priv->wm.wm_mutex);
1541 /* latency must be in 0.1us units. */
1542 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
1543 unsigned int htotal,
1546 unsigned int latency)
1550 ret = intel_wm_method2(pixel_rate, htotal,
1551 width, cpp, latency);
1552 ret = DIV_ROUND_UP(ret, 64);
1557 static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
1559 /* all latencies in usec */
1560 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
1562 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
1564 if (IS_CHERRYVIEW(dev_priv)) {
1565 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
1566 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1568 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
1572 static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1573 const struct intel_plane_state *plane_state,
1576 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1577 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1578 const struct drm_display_mode *adjusted_mode =
1579 &crtc_state->base.adjusted_mode;
1580 int clock, htotal, cpp, width, wm;
1582 if (dev_priv->wm.pri_latency[level] == 0)
1585 if (!intel_wm_plane_visible(crtc_state, plane_state))
1588 cpp = plane_state->base.fb->format->cpp[0];
1589 clock = adjusted_mode->crtc_clock;
1590 htotal = adjusted_mode->crtc_htotal;
1591 width = crtc_state->pipe_src_w;
1593 if (plane->id == PLANE_CURSOR) {
1595 * FIXME the formula gives values that are
1596 * too big for the cursor FIFO, and hence we
1597 * would never be able to use cursors. For
1598 * now just hardcode the watermark.
1602 wm = vlv_wm_method2(clock, htotal, width, cpp,
1603 dev_priv->wm.pri_latency[level] * 10);
1606 return min_t(int, wm, USHRT_MAX);
1609 static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
1611 return (active_planes & (BIT(PLANE_SPRITE0) |
1612 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
1615 static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1617 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1618 const struct g4x_pipe_wm *raw =
1619 &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1620 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1621 unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1622 int num_active_planes = hweight32(active_planes);
1623 const int fifo_size = 511;
1624 int fifo_extra, fifo_left = fifo_size;
1625 int sprite0_fifo_extra = 0;
1626 unsigned int total_rate;
1627 enum plane_id plane_id;
1630 * When enabling sprite0 after sprite1 has already been enabled
1631 * we tend to get an underrun unless sprite0 already has some
1632 * FIFO space allcoated. Hence we always allocate at least one
1633 * cacheline for sprite0 whenever sprite1 is enabled.
1635 * All other plane enable sequences appear immune to this problem.
1637 if (vlv_need_sprite0_fifo_workaround(active_planes))
1638 sprite0_fifo_extra = 1;
1640 total_rate = raw->plane[PLANE_PRIMARY] +
1641 raw->plane[PLANE_SPRITE0] +
1642 raw->plane[PLANE_SPRITE1] +
1645 if (total_rate > fifo_size)
1648 if (total_rate == 0)
1651 for_each_plane_id_on_crtc(crtc, plane_id) {
1654 if ((active_planes & BIT(plane_id)) == 0) {
1655 fifo_state->plane[plane_id] = 0;
1659 rate = raw->plane[plane_id];
1660 fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
1661 fifo_left -= fifo_state->plane[plane_id];
1664 fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
1665 fifo_left -= sprite0_fifo_extra;
1667 fifo_state->plane[PLANE_CURSOR] = 63;
1669 fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
1671 /* spread the remainder evenly */
1672 for_each_plane_id_on_crtc(crtc, plane_id) {
1678 if ((active_planes & BIT(plane_id)) == 0)
1681 plane_extra = min(fifo_extra, fifo_left);
1682 fifo_state->plane[plane_id] += plane_extra;
1683 fifo_left -= plane_extra;
1686 WARN_ON(active_planes != 0 && fifo_left != 0);
1688 /* give it all to the first plane if none are active */
1689 if (active_planes == 0) {
1690 WARN_ON(fifo_left != fifo_size);
1691 fifo_state->plane[PLANE_PRIMARY] = fifo_left;
1697 /* mark all levels starting from 'level' as invalid */
1698 static void vlv_invalidate_wms(struct intel_crtc *crtc,
1699 struct vlv_wm_state *wm_state, int level)
1701 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1703 for (; level < intel_wm_num_levels(dev_priv); level++) {
1704 enum plane_id plane_id;
1706 for_each_plane_id_on_crtc(crtc, plane_id)
1707 wm_state->wm[level].plane[plane_id] = USHRT_MAX;
1709 wm_state->sr[level].cursor = USHRT_MAX;
1710 wm_state->sr[level].plane = USHRT_MAX;
1714 static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1719 return fifo_size - wm;
1723 * Starting from 'level' set all higher
1724 * levels to 'value' in the "raw" watermarks.
1726 static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1727 int level, enum plane_id plane_id, u16 value)
1729 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1730 int num_levels = intel_wm_num_levels(dev_priv);
1733 for (; level < num_levels; level++) {
1734 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1736 dirty |= raw->plane[plane_id] != value;
1737 raw->plane[plane_id] = value;
1743 static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1744 const struct intel_plane_state *plane_state)
1746 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1747 enum plane_id plane_id = plane->id;
1748 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1752 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1753 dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1757 for (level = 0; level < num_levels; level++) {
1758 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1759 int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
1760 int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
1765 dirty |= raw->plane[plane_id] != wm;
1766 raw->plane[plane_id] = wm;
1769 /* mark all higher levels as invalid */
1770 dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1774 DRM_DEBUG_KMS("%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1776 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
1777 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
1778 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
1783 static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1784 enum plane_id plane_id, int level)
1786 const struct g4x_pipe_wm *raw =
1787 &crtc_state->wm.vlv.raw[level];
1788 const struct vlv_fifo_state *fifo_state =
1789 &crtc_state->wm.vlv.fifo_state;
1791 return raw->plane[plane_id] <= fifo_state->plane[plane_id];
1794 static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
1796 return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1797 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1798 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
1799 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1802 static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1804 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1805 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1806 struct intel_atomic_state *state =
1807 to_intel_atomic_state(crtc_state->base.state);
1808 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1809 const struct vlv_fifo_state *fifo_state =
1810 &crtc_state->wm.vlv.fifo_state;
1811 int num_active_planes = hweight32(crtc_state->active_planes &
1812 ~BIT(PLANE_CURSOR));
1813 bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
1814 struct intel_plane_state *plane_state;
1815 struct intel_plane *plane;
1816 enum plane_id plane_id;
1818 unsigned int dirty = 0;
1820 for_each_intel_plane_in_state(state, plane, plane_state, i) {
1821 const struct intel_plane_state *old_plane_state =
1822 to_intel_plane_state(plane->base.state);
1824 if (plane_state->base.crtc != &crtc->base &&
1825 old_plane_state->base.crtc != &crtc->base)
1828 if (vlv_raw_plane_wm_compute(crtc_state, plane_state))
1829 dirty |= BIT(plane->id);
1833 * DSPARB registers may have been reset due to the
1834 * power well being turned off. Make sure we restore
1835 * them to a consistent state even if no primary/sprite
1836 * planes are initially active.
1839 crtc_state->fifo_changed = true;
1844 /* cursor changes don't warrant a FIFO recompute */
1845 if (dirty & ~BIT(PLANE_CURSOR)) {
1846 const struct intel_crtc_state *old_crtc_state =
1847 to_intel_crtc_state(crtc->base.state);
1848 const struct vlv_fifo_state *old_fifo_state =
1849 &old_crtc_state->wm.vlv.fifo_state;
1851 ret = vlv_compute_fifo(crtc_state);
1855 if (needs_modeset ||
1856 memcmp(old_fifo_state, fifo_state,
1857 sizeof(*fifo_state)) != 0)
1858 crtc_state->fifo_changed = true;
1861 /* initially allow all levels */
1862 wm_state->num_levels = intel_wm_num_levels(dev_priv);
1864 * Note that enabling cxsr with no primary/sprite planes
1865 * enabled can wedge the pipe. Hence we only allow cxsr
1866 * with exactly one enabled primary/sprite plane.
1868 wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
1870 for (level = 0; level < wm_state->num_levels; level++) {
1871 const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1872 const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
1874 if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
1877 for_each_plane_id_on_crtc(crtc, plane_id) {
1878 wm_state->wm[level].plane[plane_id] =
1879 vlv_invert_wm_value(raw->plane[plane_id],
1880 fifo_state->plane[plane_id]);
1883 wm_state->sr[level].plane =
1884 vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
1885 raw->plane[PLANE_SPRITE0],
1886 raw->plane[PLANE_SPRITE1]),
1889 wm_state->sr[level].cursor =
1890 vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
1897 /* limit to only levels we can actually handle */
1898 wm_state->num_levels = level;
1900 /* invalidate the higher levels */
1901 vlv_invalidate_wms(crtc, wm_state, level);
1906 #define VLV_FIFO(plane, value) \
1907 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1909 static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
1910 struct intel_crtc_state *crtc_state)
1912 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1913 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1914 const struct vlv_fifo_state *fifo_state =
1915 &crtc_state->wm.vlv.fifo_state;
1916 int sprite0_start, sprite1_start, fifo_size;
1918 if (!crtc_state->fifo_changed)
1921 sprite0_start = fifo_state->plane[PLANE_PRIMARY];
1922 sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
1923 fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
1925 WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63);
1926 WARN_ON(fifo_size != 511);
1928 trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
1931 * uncore.lock serves a double purpose here. It allows us to
1932 * use the less expensive I915_{READ,WRITE}_FW() functions, and
1933 * it protects the DSPARB registers from getting clobbered by
1934 * parallel updates from multiple pipes.
1936 * intel_pipe_update_start() has already disabled interrupts
1937 * for us, so a plain spin_lock() is sufficient here.
1939 spin_lock(&dev_priv->uncore.lock);
1941 switch (crtc->pipe) {
1942 uint32_t dsparb, dsparb2, dsparb3;
1944 dsparb = I915_READ_FW(DSPARB);
1945 dsparb2 = I915_READ_FW(DSPARB2);
1947 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1948 VLV_FIFO(SPRITEB, 0xff));
1949 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1950 VLV_FIFO(SPRITEB, sprite1_start));
1952 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1953 VLV_FIFO(SPRITEB_HI, 0x1));
1954 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1955 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1957 I915_WRITE_FW(DSPARB, dsparb);
1958 I915_WRITE_FW(DSPARB2, dsparb2);
1961 dsparb = I915_READ_FW(DSPARB);
1962 dsparb2 = I915_READ_FW(DSPARB2);
1964 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1965 VLV_FIFO(SPRITED, 0xff));
1966 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1967 VLV_FIFO(SPRITED, sprite1_start));
1969 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1970 VLV_FIFO(SPRITED_HI, 0xff));
1971 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1972 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1974 I915_WRITE_FW(DSPARB, dsparb);
1975 I915_WRITE_FW(DSPARB2, dsparb2);
1978 dsparb3 = I915_READ_FW(DSPARB3);
1979 dsparb2 = I915_READ_FW(DSPARB2);
1981 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1982 VLV_FIFO(SPRITEF, 0xff));
1983 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1984 VLV_FIFO(SPRITEF, sprite1_start));
1986 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1987 VLV_FIFO(SPRITEF_HI, 0xff));
1988 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1989 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1991 I915_WRITE_FW(DSPARB3, dsparb3);
1992 I915_WRITE_FW(DSPARB2, dsparb2);
1998 POSTING_READ_FW(DSPARB);
2000 spin_unlock(&dev_priv->uncore.lock);
2005 static int vlv_compute_intermediate_wm(struct drm_device *dev,
2006 struct intel_crtc *crtc,
2007 struct intel_crtc_state *crtc_state)
2009 struct vlv_wm_state *intermediate = &crtc_state->wm.vlv.intermediate;
2010 const struct vlv_wm_state *optimal = &crtc_state->wm.vlv.optimal;
2011 const struct vlv_wm_state *active = &crtc->wm.active.vlv;
2014 intermediate->num_levels = min(optimal->num_levels, active->num_levels);
2015 intermediate->cxsr = optimal->cxsr && active->cxsr &&
2016 !crtc_state->disable_cxsr;
2018 for (level = 0; level < intermediate->num_levels; level++) {
2019 enum plane_id plane_id;
2021 for_each_plane_id_on_crtc(crtc, plane_id) {
2022 intermediate->wm[level].plane[plane_id] =
2023 min(optimal->wm[level].plane[plane_id],
2024 active->wm[level].plane[plane_id]);
2027 intermediate->sr[level].plane = min(optimal->sr[level].plane,
2028 active->sr[level].plane);
2029 intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
2030 active->sr[level].cursor);
2033 vlv_invalidate_wms(crtc, intermediate, level);
2036 * If our intermediate WM are identical to the final WM, then we can
2037 * omit the post-vblank programming; only update if it's different.
2039 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
2040 crtc_state->wm.need_postvbl_update = true;
2045 static void vlv_merge_wm(struct drm_i915_private *dev_priv,
2046 struct vlv_wm_values *wm)
2048 struct intel_crtc *crtc;
2049 int num_active_crtcs = 0;
2051 wm->level = dev_priv->wm.max_level;
2054 for_each_intel_crtc(&dev_priv->drm, crtc) {
2055 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2060 if (!wm_state->cxsr)
2064 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
2067 if (num_active_crtcs != 1)
2070 if (num_active_crtcs > 1)
2071 wm->level = VLV_WM_LEVEL_PM2;
2073 for_each_intel_crtc(&dev_priv->drm, crtc) {
2074 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2075 enum pipe pipe = crtc->pipe;
2077 wm->pipe[pipe] = wm_state->wm[wm->level];
2078 if (crtc->active && wm->cxsr)
2079 wm->sr = wm_state->sr[wm->level];
2081 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
2082 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
2083 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
2084 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
2088 static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
2090 struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
2091 struct vlv_wm_values new_wm = {};
2093 vlv_merge_wm(dev_priv, &new_wm);
2095 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
2098 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2099 chv_set_memory_dvfs(dev_priv, false);
2101 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2102 chv_set_memory_pm5(dev_priv, false);
2104 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
2105 _intel_set_memory_cxsr(dev_priv, false);
2107 vlv_write_wm_values(dev_priv, &new_wm);
2109 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
2110 _intel_set_memory_cxsr(dev_priv, true);
2112 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2113 chv_set_memory_pm5(dev_priv, true);
2115 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2116 chv_set_memory_dvfs(dev_priv, true);
2121 static void vlv_initial_watermarks(struct intel_atomic_state *state,
2122 struct intel_crtc_state *crtc_state)
2124 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2125 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2127 mutex_lock(&dev_priv->wm.wm_mutex);
2128 crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
2129 vlv_program_watermarks(dev_priv);
2130 mutex_unlock(&dev_priv->wm.wm_mutex);
2133 static void vlv_optimize_watermarks(struct intel_atomic_state *state,
2134 struct intel_crtc_state *crtc_state)
2136 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2137 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2139 if (!crtc_state->wm.need_postvbl_update)
2142 mutex_lock(&dev_priv->wm.wm_mutex);
2143 intel_crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
2144 vlv_program_watermarks(dev_priv);
2145 mutex_unlock(&dev_priv->wm.wm_mutex);
2148 static void i965_update_wm(struct intel_crtc *unused_crtc)
2150 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2151 struct intel_crtc *crtc;
2156 /* Calc sr entries for one plane configs */
2157 crtc = single_enabled_crtc(dev_priv);
2159 /* self-refresh has much higher latency */
2160 static const int sr_latency_ns = 12000;
2161 const struct drm_display_mode *adjusted_mode =
2162 &crtc->config->base.adjusted_mode;
2163 const struct drm_framebuffer *fb =
2164 crtc->base.primary->state->fb;
2165 int clock = adjusted_mode->crtc_clock;
2166 int htotal = adjusted_mode->crtc_htotal;
2167 int hdisplay = crtc->config->pipe_src_w;
2168 int cpp = fb->format->cpp[0];
2171 entries = intel_wm_method2(clock, htotal,
2172 hdisplay, cpp, sr_latency_ns / 100);
2173 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
2174 srwm = I965_FIFO_SIZE - entries;
2178 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
2181 entries = intel_wm_method2(clock, htotal,
2182 crtc->base.cursor->state->crtc_w, 4,
2183 sr_latency_ns / 100);
2184 entries = DIV_ROUND_UP(entries,
2185 i965_cursor_wm_info.cacheline_size) +
2186 i965_cursor_wm_info.guard_size;
2188 cursor_sr = i965_cursor_wm_info.fifo_size - entries;
2189 if (cursor_sr > i965_cursor_wm_info.max_wm)
2190 cursor_sr = i965_cursor_wm_info.max_wm;
2192 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
2193 "cursor %d\n", srwm, cursor_sr);
2195 cxsr_enabled = true;
2197 cxsr_enabled = false;
2198 /* Turn off self refresh if both pipes are enabled */
2199 intel_set_memory_cxsr(dev_priv, false);
2202 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2205 /* 965 has limitations... */
2206 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
2210 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
2211 FW_WM(8, PLANEC_OLD));
2212 /* update cursor SR watermark */
2213 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
2216 intel_set_memory_cxsr(dev_priv, true);
2221 static void i9xx_update_wm(struct intel_crtc *unused_crtc)
2223 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2224 const struct intel_watermark_params *wm_info;
2229 int planea_wm, planeb_wm;
2230 struct intel_crtc *crtc, *enabled = NULL;
2232 if (IS_I945GM(dev_priv))
2233 wm_info = &i945_wm_info;
2234 else if (!IS_GEN2(dev_priv))
2235 wm_info = &i915_wm_info;
2237 wm_info = &i830_a_wm_info;
2239 fifo_size = dev_priv->display.get_fifo_size(dev_priv, 0);
2240 crtc = intel_get_crtc_for_plane(dev_priv, 0);
2241 if (intel_crtc_active(crtc)) {
2242 const struct drm_display_mode *adjusted_mode =
2243 &crtc->config->base.adjusted_mode;
2244 const struct drm_framebuffer *fb =
2245 crtc->base.primary->state->fb;
2248 if (IS_GEN2(dev_priv))
2251 cpp = fb->format->cpp[0];
2253 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2254 wm_info, fifo_size, cpp,
2255 pessimal_latency_ns);
2258 planea_wm = fifo_size - wm_info->guard_size;
2259 if (planea_wm > (long)wm_info->max_wm)
2260 planea_wm = wm_info->max_wm;
2263 if (IS_GEN2(dev_priv))
2264 wm_info = &i830_bc_wm_info;
2266 fifo_size = dev_priv->display.get_fifo_size(dev_priv, 1);
2267 crtc = intel_get_crtc_for_plane(dev_priv, 1);
2268 if (intel_crtc_active(crtc)) {
2269 const struct drm_display_mode *adjusted_mode =
2270 &crtc->config->base.adjusted_mode;
2271 const struct drm_framebuffer *fb =
2272 crtc->base.primary->state->fb;
2275 if (IS_GEN2(dev_priv))
2278 cpp = fb->format->cpp[0];
2280 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2281 wm_info, fifo_size, cpp,
2282 pessimal_latency_ns);
2283 if (enabled == NULL)
2288 planeb_wm = fifo_size - wm_info->guard_size;
2289 if (planeb_wm > (long)wm_info->max_wm)
2290 planeb_wm = wm_info->max_wm;
2293 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2295 if (IS_I915GM(dev_priv) && enabled) {
2296 struct drm_i915_gem_object *obj;
2298 obj = intel_fb_obj(enabled->base.primary->state->fb);
2300 /* self-refresh seems busted with untiled */
2301 if (!i915_gem_object_is_tiled(obj))
2306 * Overlay gets an aggressive default since video jitter is bad.
2310 /* Play safe and disable self-refresh before adjusting watermarks. */
2311 intel_set_memory_cxsr(dev_priv, false);
2313 /* Calc sr entries for one plane configs */
2314 if (HAS_FW_BLC(dev_priv) && enabled) {
2315 /* self-refresh has much higher latency */
2316 static const int sr_latency_ns = 6000;
2317 const struct drm_display_mode *adjusted_mode =
2318 &enabled->config->base.adjusted_mode;
2319 const struct drm_framebuffer *fb =
2320 enabled->base.primary->state->fb;
2321 int clock = adjusted_mode->crtc_clock;
2322 int htotal = adjusted_mode->crtc_htotal;
2323 int hdisplay = enabled->config->pipe_src_w;
2327 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
2330 cpp = fb->format->cpp[0];
2332 entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
2333 sr_latency_ns / 100);
2334 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
2335 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
2336 srwm = wm_info->fifo_size - entries;
2340 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
2341 I915_WRITE(FW_BLC_SELF,
2342 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2344 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
2347 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2348 planea_wm, planeb_wm, cwm, srwm);
2350 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
2351 fwater_hi = (cwm & 0x1f);
2353 /* Set request length to 8 cachelines per fetch */
2354 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
2355 fwater_hi = fwater_hi | (1 << 8);
2357 I915_WRITE(FW_BLC, fwater_lo);
2358 I915_WRITE(FW_BLC2, fwater_hi);
2361 intel_set_memory_cxsr(dev_priv, true);
2364 static void i845_update_wm(struct intel_crtc *unused_crtc)
2366 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2367 struct intel_crtc *crtc;
2368 const struct drm_display_mode *adjusted_mode;
2372 crtc = single_enabled_crtc(dev_priv);
2376 adjusted_mode = &crtc->config->base.adjusted_mode;
2377 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2379 dev_priv->display.get_fifo_size(dev_priv, 0),
2380 4, pessimal_latency_ns);
2381 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
2382 fwater_lo |= (3<<8) | planea_wm;
2384 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
2386 I915_WRITE(FW_BLC, fwater_lo);
2389 /* latency must be in 0.1us units. */
2390 static unsigned int ilk_wm_method1(unsigned int pixel_rate,
2392 unsigned int latency)
2396 ret = intel_wm_method1(pixel_rate, cpp, latency);
2397 ret = DIV_ROUND_UP(ret, 64) + 2;
2402 /* latency must be in 0.1us units. */
2403 static unsigned int ilk_wm_method2(unsigned int pixel_rate,
2404 unsigned int htotal,
2407 unsigned int latency)
2411 ret = intel_wm_method2(pixel_rate, htotal,
2412 width, cpp, latency);
2413 ret = DIV_ROUND_UP(ret, 64) + 2;
2418 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
2422 * Neither of these should be possible since this function shouldn't be
2423 * called if the CRTC is off or the plane is invisible. But let's be
2424 * extra paranoid to avoid a potential divide-by-zero if we screw up
2425 * elsewhere in the driver.
2429 if (WARN_ON(!horiz_pixels))
2432 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
2435 struct ilk_wm_maximums {
2443 * For both WM_PIPE and WM_LP.
2444 * mem_value must be in 0.1us units.
2446 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
2447 const struct intel_plane_state *pstate,
2451 uint32_t method1, method2;
2454 if (!intel_wm_plane_visible(cstate, pstate))
2457 cpp = pstate->base.fb->format->cpp[0];
2459 method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
2464 method2 = ilk_wm_method2(cstate->pixel_rate,
2465 cstate->base.adjusted_mode.crtc_htotal,
2466 drm_rect_width(&pstate->base.dst),
2469 return min(method1, method2);
2473 * For both WM_PIPE and WM_LP.
2474 * mem_value must be in 0.1us units.
2476 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
2477 const struct intel_plane_state *pstate,
2480 uint32_t method1, method2;
2483 if (!intel_wm_plane_visible(cstate, pstate))
2486 cpp = pstate->base.fb->format->cpp[0];
2488 method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
2489 method2 = ilk_wm_method2(cstate->pixel_rate,
2490 cstate->base.adjusted_mode.crtc_htotal,
2491 drm_rect_width(&pstate->base.dst),
2493 return min(method1, method2);
2497 * For both WM_PIPE and WM_LP.
2498 * mem_value must be in 0.1us units.
2500 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
2501 const struct intel_plane_state *pstate,
2506 if (!intel_wm_plane_visible(cstate, pstate))
2509 cpp = pstate->base.fb->format->cpp[0];
2511 return ilk_wm_method2(cstate->pixel_rate,
2512 cstate->base.adjusted_mode.crtc_htotal,
2513 pstate->base.crtc_w, cpp, mem_value);
2516 /* Only for WM_LP. */
2517 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
2518 const struct intel_plane_state *pstate,
2523 if (!intel_wm_plane_visible(cstate, pstate))
2526 cpp = pstate->base.fb->format->cpp[0];
2528 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
2532 ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
2534 if (INTEL_GEN(dev_priv) >= 8)
2536 else if (INTEL_GEN(dev_priv) >= 7)
2543 ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
2544 int level, bool is_sprite)
2546 if (INTEL_GEN(dev_priv) >= 8)
2547 /* BDW primary/sprite plane watermarks */
2548 return level == 0 ? 255 : 2047;
2549 else if (INTEL_GEN(dev_priv) >= 7)
2550 /* IVB/HSW primary/sprite plane watermarks */
2551 return level == 0 ? 127 : 1023;
2552 else if (!is_sprite)
2553 /* ILK/SNB primary plane watermarks */
2554 return level == 0 ? 127 : 511;
2556 /* ILK/SNB sprite plane watermarks */
2557 return level == 0 ? 63 : 255;
2561 ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
2563 if (INTEL_GEN(dev_priv) >= 7)
2564 return level == 0 ? 63 : 255;
2566 return level == 0 ? 31 : 63;
2569 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2571 if (INTEL_GEN(dev_priv) >= 8)
2577 /* Calculate the maximum primary/sprite plane watermark */
2578 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2580 const struct intel_wm_config *config,
2581 enum intel_ddb_partitioning ddb_partitioning,
2584 struct drm_i915_private *dev_priv = to_i915(dev);
2585 unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2587 /* if sprites aren't enabled, sprites get nothing */
2588 if (is_sprite && !config->sprites_enabled)
2591 /* HSW allows LP1+ watermarks even with multiple pipes */
2592 if (level == 0 || config->num_pipes_active > 1) {
2593 fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
2596 * For some reason the non self refresh
2597 * FIFO size is only half of the self
2598 * refresh FIFO size on ILK/SNB.
2600 if (INTEL_GEN(dev_priv) <= 6)
2604 if (config->sprites_enabled) {
2605 /* level 0 is always calculated with 1:1 split */
2606 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2615 /* clamp to max that the registers can hold */
2616 return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
2619 /* Calculate the maximum cursor plane watermark */
2620 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
2622 const struct intel_wm_config *config)
2624 /* HSW LP1+ watermarks w/ multiple pipes */
2625 if (level > 0 && config->num_pipes_active > 1)
2628 /* otherwise just report max that registers can hold */
2629 return ilk_cursor_wm_reg_max(to_i915(dev), level);
2632 static void ilk_compute_wm_maximums(const struct drm_device *dev,
2634 const struct intel_wm_config *config,
2635 enum intel_ddb_partitioning ddb_partitioning,
2636 struct ilk_wm_maximums *max)
2638 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2639 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2640 max->cur = ilk_cursor_wm_max(dev, level, config);
2641 max->fbc = ilk_fbc_wm_reg_max(to_i915(dev));
2644 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
2646 struct ilk_wm_maximums *max)
2648 max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
2649 max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
2650 max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
2651 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2654 static bool ilk_validate_wm_level(int level,
2655 const struct ilk_wm_maximums *max,
2656 struct intel_wm_level *result)
2660 /* already determined to be invalid? */
2661 if (!result->enable)
2664 result->enable = result->pri_val <= max->pri &&
2665 result->spr_val <= max->spr &&
2666 result->cur_val <= max->cur;
2668 ret = result->enable;
2671 * HACK until we can pre-compute everything,
2672 * and thus fail gracefully if LP0 watermarks
2675 if (level == 0 && !result->enable) {
2676 if (result->pri_val > max->pri)
2677 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2678 level, result->pri_val, max->pri);
2679 if (result->spr_val > max->spr)
2680 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2681 level, result->spr_val, max->spr);
2682 if (result->cur_val > max->cur)
2683 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2684 level, result->cur_val, max->cur);
2686 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2687 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2688 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2689 result->enable = true;
2695 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2696 const struct intel_crtc *intel_crtc,
2698 struct intel_crtc_state *cstate,
2699 struct intel_plane_state *pristate,
2700 struct intel_plane_state *sprstate,
2701 struct intel_plane_state *curstate,
2702 struct intel_wm_level *result)
2704 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2705 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2706 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2708 /* WM1+ latency values stored in 0.5us units */
2716 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2717 pri_latency, level);
2718 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2722 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2725 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2727 result->enable = true;
2731 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2733 const struct intel_atomic_state *intel_state =
2734 to_intel_atomic_state(cstate->base.state);
2735 const struct drm_display_mode *adjusted_mode =
2736 &cstate->base.adjusted_mode;
2737 u32 linetime, ips_linetime;
2739 if (!cstate->base.active)
2741 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2743 if (WARN_ON(intel_state->cdclk.logical.cdclk == 0))
2746 /* The WM are computed with base on how long it takes to fill a single
2747 * row at the given clock rate, multiplied by 8.
2749 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2750 adjusted_mode->crtc_clock);
2751 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2752 intel_state->cdclk.logical.cdclk);
2754 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2755 PIPE_WM_LINETIME_TIME(linetime);
2758 static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2761 if (IS_GEN9(dev_priv)) {
2764 int level, max_level = ilk_wm_max_level(dev_priv);
2766 /* read the first set of memory latencies[0:3] */
2767 val = 0; /* data0 to be programmed to 0 for first set */
2768 mutex_lock(&dev_priv->rps.hw_lock);
2769 ret = sandybridge_pcode_read(dev_priv,
2770 GEN9_PCODE_READ_MEM_LATENCY,
2772 mutex_unlock(&dev_priv->rps.hw_lock);
2775 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2779 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2780 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2781 GEN9_MEM_LATENCY_LEVEL_MASK;
2782 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2783 GEN9_MEM_LATENCY_LEVEL_MASK;
2784 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2785 GEN9_MEM_LATENCY_LEVEL_MASK;
2787 /* read the second set of memory latencies[4:7] */
2788 val = 1; /* data0 to be programmed to 1 for second set */
2789 mutex_lock(&dev_priv->rps.hw_lock);
2790 ret = sandybridge_pcode_read(dev_priv,
2791 GEN9_PCODE_READ_MEM_LATENCY,
2793 mutex_unlock(&dev_priv->rps.hw_lock);
2795 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2799 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2800 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2801 GEN9_MEM_LATENCY_LEVEL_MASK;
2802 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2803 GEN9_MEM_LATENCY_LEVEL_MASK;
2804 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2805 GEN9_MEM_LATENCY_LEVEL_MASK;
2808 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2809 * need to be disabled. We make sure to sanitize the values out
2810 * of the punit to satisfy this requirement.
2812 for (level = 1; level <= max_level; level++) {
2813 if (wm[level] == 0) {
2814 for (i = level + 1; i <= max_level; i++)
2821 * WaWmMemoryReadLatency:skl,glk
2823 * punit doesn't take into account the read latency so we need
2824 * to add 2us to the various latency levels we retrieve from the
2825 * punit when level 0 response data us 0us.
2829 for (level = 1; level <= max_level; level++) {
2836 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2837 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2839 wm[0] = (sskpd >> 56) & 0xFF;
2841 wm[0] = sskpd & 0xF;
2842 wm[1] = (sskpd >> 4) & 0xFF;
2843 wm[2] = (sskpd >> 12) & 0xFF;
2844 wm[3] = (sskpd >> 20) & 0x1FF;
2845 wm[4] = (sskpd >> 32) & 0x1FF;
2846 } else if (INTEL_GEN(dev_priv) >= 6) {
2847 uint32_t sskpd = I915_READ(MCH_SSKPD);
2849 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2850 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2851 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2852 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2853 } else if (INTEL_GEN(dev_priv) >= 5) {
2854 uint32_t mltr = I915_READ(MLTR_ILK);
2856 /* ILK primary LP0 latency is 700 ns */
2858 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2859 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2863 static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2866 /* ILK sprite LP0 latency is 1300 ns */
2867 if (IS_GEN5(dev_priv))
2871 static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2874 /* ILK cursor LP0 latency is 1300 ns */
2875 if (IS_GEN5(dev_priv))
2878 /* WaDoubleCursorLP3Latency:ivb */
2879 if (IS_IVYBRIDGE(dev_priv))
2883 int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
2885 /* how many WM levels are we expecting */
2886 if (INTEL_GEN(dev_priv) >= 9)
2888 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2890 else if (INTEL_GEN(dev_priv) >= 6)
2896 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
2898 const uint16_t wm[8])
2900 int level, max_level = ilk_wm_max_level(dev_priv);
2902 for (level = 0; level <= max_level; level++) {
2903 unsigned int latency = wm[level];
2906 DRM_ERROR("%s WM%d latency not provided\n",
2912 * - latencies are in us on gen9.
2913 * - before then, WM1+ latency values are in 0.5us units
2915 if (IS_GEN9(dev_priv))
2920 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2921 name, level, wm[level],
2922 latency / 10, latency % 10);
2926 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2927 uint16_t wm[5], uint16_t min)
2929 int level, max_level = ilk_wm_max_level(dev_priv);
2934 wm[0] = max(wm[0], min);
2935 for (level = 1; level <= max_level; level++)
2936 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2941 static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
2946 * The BIOS provided WM memory latency values are often
2947 * inadequate for high resolution displays. Adjust them.
2949 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2950 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2951 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2956 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2957 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
2958 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
2959 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
2962 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
2964 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
2966 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2967 sizeof(dev_priv->wm.pri_latency));
2968 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2969 sizeof(dev_priv->wm.pri_latency));
2971 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
2972 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
2974 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
2975 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
2976 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
2978 if (IS_GEN6(dev_priv))
2979 snb_wm_latency_quirk(dev_priv);
2982 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
2984 intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
2985 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
2988 static bool ilk_validate_pipe_wm(struct drm_device *dev,
2989 struct intel_pipe_wm *pipe_wm)
2991 /* LP0 watermark maximums depend on this pipe alone */
2992 const struct intel_wm_config config = {
2993 .num_pipes_active = 1,
2994 .sprites_enabled = pipe_wm->sprites_enabled,
2995 .sprites_scaled = pipe_wm->sprites_scaled,
2997 struct ilk_wm_maximums max;
2999 /* LP0 watermarks always use 1/2 DDB partitioning */
3000 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
3002 /* At least LP0 must be valid */
3003 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
3004 DRM_DEBUG_KMS("LP0 watermark invalid\n");
3011 /* Compute new watermarks for the pipe */
3012 static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
3014 struct drm_atomic_state *state = cstate->base.state;
3015 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3016 struct intel_pipe_wm *pipe_wm;
3017 struct drm_device *dev = state->dev;
3018 const struct drm_i915_private *dev_priv = to_i915(dev);
3019 struct intel_plane *intel_plane;
3020 struct intel_plane_state *pristate = NULL;
3021 struct intel_plane_state *sprstate = NULL;
3022 struct intel_plane_state *curstate = NULL;
3023 int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
3024 struct ilk_wm_maximums max;
3026 pipe_wm = &cstate->wm.ilk.optimal;
3028 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3029 struct intel_plane_state *ps;
3031 ps = intel_atomic_get_existing_plane_state(state,
3036 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
3038 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
3040 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
3044 pipe_wm->pipe_enabled = cstate->base.active;
3046 pipe_wm->sprites_enabled = sprstate->base.visible;
3047 pipe_wm->sprites_scaled = sprstate->base.visible &&
3048 (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
3049 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
3052 usable_level = max_level;
3054 /* ILK/SNB: LP2+ watermarks only w/o sprites */
3055 if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
3058 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
3059 if (pipe_wm->sprites_scaled)
3062 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
3063 pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
3065 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
3066 pipe_wm->wm[0] = pipe_wm->raw_wm[0];
3068 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3069 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
3071 if (!ilk_validate_pipe_wm(dev, pipe_wm))
3074 ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
3076 for (level = 1; level <= max_level; level++) {
3077 struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
3079 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
3080 pristate, sprstate, curstate, wm);
3083 * Disable any watermark level that exceeds the
3084 * register maximums since such watermarks are
3087 if (level > usable_level)
3090 if (ilk_validate_wm_level(level, &max, wm))
3091 pipe_wm->wm[level] = *wm;
3093 usable_level = level;
3100 * Build a set of 'intermediate' watermark values that satisfy both the old
3101 * state and the new state. These can be programmed to the hardware
3104 static int ilk_compute_intermediate_wm(struct drm_device *dev,
3105 struct intel_crtc *intel_crtc,
3106 struct intel_crtc_state *newstate)
3108 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
3109 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
3110 int level, max_level = ilk_wm_max_level(to_i915(dev));
3113 * Start with the final, target watermarks, then combine with the
3114 * currently active watermarks to get values that are safe both before
3115 * and after the vblank.
3117 *a = newstate->wm.ilk.optimal;
3118 a->pipe_enabled |= b->pipe_enabled;
3119 a->sprites_enabled |= b->sprites_enabled;
3120 a->sprites_scaled |= b->sprites_scaled;
3122 for (level = 0; level <= max_level; level++) {
3123 struct intel_wm_level *a_wm = &a->wm[level];
3124 const struct intel_wm_level *b_wm = &b->wm[level];
3126 a_wm->enable &= b_wm->enable;
3127 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
3128 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
3129 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
3130 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
3134 * We need to make sure that these merged watermark values are
3135 * actually a valid configuration themselves. If they're not,
3136 * there's no safe way to transition from the old state to
3137 * the new state, so we need to fail the atomic transaction.
3139 if (!ilk_validate_pipe_wm(dev, a))
3143 * If our intermediate WM are identical to the final WM, then we can
3144 * omit the post-vblank programming; only update if it's different.
3146 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0)
3147 newstate->wm.need_postvbl_update = true;
3153 * Merge the watermarks from all active pipes for a specific level.
3155 static void ilk_merge_wm_level(struct drm_device *dev,
3157 struct intel_wm_level *ret_wm)
3159 const struct intel_crtc *intel_crtc;
3161 ret_wm->enable = true;
3163 for_each_intel_crtc(dev, intel_crtc) {
3164 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
3165 const struct intel_wm_level *wm = &active->wm[level];
3167 if (!active->pipe_enabled)
3171 * The watermark values may have been used in the past,
3172 * so we must maintain them in the registers for some
3173 * time even if the level is now disabled.
3176 ret_wm->enable = false;
3178 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
3179 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
3180 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
3181 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
3186 * Merge all low power watermarks for all active pipes.
3188 static void ilk_wm_merge(struct drm_device *dev,
3189 const struct intel_wm_config *config,
3190 const struct ilk_wm_maximums *max,
3191 struct intel_pipe_wm *merged)
3193 struct drm_i915_private *dev_priv = to_i915(dev);
3194 int level, max_level = ilk_wm_max_level(dev_priv);
3195 int last_enabled_level = max_level;
3197 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
3198 if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
3199 config->num_pipes_active > 1)
3200 last_enabled_level = 0;
3202 /* ILK: FBC WM must be disabled always */
3203 merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
3205 /* merge each WM1+ level */
3206 for (level = 1; level <= max_level; level++) {
3207 struct intel_wm_level *wm = &merged->wm[level];
3209 ilk_merge_wm_level(dev, level, wm);
3211 if (level > last_enabled_level)
3213 else if (!ilk_validate_wm_level(level, max, wm))
3214 /* make sure all following levels get disabled */
3215 last_enabled_level = level - 1;
3218 * The spec says it is preferred to disable
3219 * FBC WMs instead of disabling a WM level.
3221 if (wm->fbc_val > max->fbc) {
3223 merged->fbc_wm_enabled = false;
3228 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
3230 * FIXME this is racy. FBC might get enabled later.
3231 * What we should check here is whether FBC can be
3232 * enabled sometime later.
3234 if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
3235 intel_fbc_is_active(dev_priv)) {
3236 for (level = 2; level <= max_level; level++) {
3237 struct intel_wm_level *wm = &merged->wm[level];
3244 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
3246 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
3247 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
3250 /* The value we need to program into the WM_LPx latency field */
3251 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
3253 struct drm_i915_private *dev_priv = to_i915(dev);
3255 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3258 return dev_priv->wm.pri_latency[level];
3261 static void ilk_compute_wm_results(struct drm_device *dev,
3262 const struct intel_pipe_wm *merged,
3263 enum intel_ddb_partitioning partitioning,
3264 struct ilk_wm_values *results)
3266 struct drm_i915_private *dev_priv = to_i915(dev);
3267 struct intel_crtc *intel_crtc;
3270 results->enable_fbc_wm = merged->fbc_wm_enabled;
3271 results->partitioning = partitioning;
3273 /* LP1+ register values */
3274 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3275 const struct intel_wm_level *r;
3277 level = ilk_wm_lp_to_level(wm_lp, merged);
3279 r = &merged->wm[level];
3282 * Maintain the watermark values even if the level is
3283 * disabled. Doing otherwise could cause underruns.
3285 results->wm_lp[wm_lp - 1] =
3286 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
3287 (r->pri_val << WM1_LP_SR_SHIFT) |
3291 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
3293 if (INTEL_GEN(dev_priv) >= 8)
3294 results->wm_lp[wm_lp - 1] |=
3295 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
3297 results->wm_lp[wm_lp - 1] |=
3298 r->fbc_val << WM1_LP_FBC_SHIFT;
3301 * Always set WM1S_LP_EN when spr_val != 0, even if the
3302 * level is disabled. Doing otherwise could cause underruns.
3304 if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
3305 WARN_ON(wm_lp != 1);
3306 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
3308 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
3311 /* LP0 register values */
3312 for_each_intel_crtc(dev, intel_crtc) {
3313 enum pipe pipe = intel_crtc->pipe;
3314 const struct intel_wm_level *r =
3315 &intel_crtc->wm.active.ilk.wm[0];
3317 if (WARN_ON(!r->enable))
3320 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
3322 results->wm_pipe[pipe] =
3323 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
3324 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
3329 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
3330 * case both are at the same level. Prefer r1 in case they're the same. */
3331 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
3332 struct intel_pipe_wm *r1,
3333 struct intel_pipe_wm *r2)
3335 int level, max_level = ilk_wm_max_level(to_i915(dev));
3336 int level1 = 0, level2 = 0;
3338 for (level = 1; level <= max_level; level++) {
3339 if (r1->wm[level].enable)
3341 if (r2->wm[level].enable)
3345 if (level1 == level2) {
3346 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
3350 } else if (level1 > level2) {
3357 /* dirty bits used to track which watermarks need changes */
3358 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3359 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
3360 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3361 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3362 #define WM_DIRTY_FBC (1 << 24)
3363 #define WM_DIRTY_DDB (1 << 25)
3365 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
3366 const struct ilk_wm_values *old,
3367 const struct ilk_wm_values *new)
3369 unsigned int dirty = 0;
3373 for_each_pipe(dev_priv, pipe) {
3374 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
3375 dirty |= WM_DIRTY_LINETIME(pipe);
3376 /* Must disable LP1+ watermarks too */
3377 dirty |= WM_DIRTY_LP_ALL;
3380 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
3381 dirty |= WM_DIRTY_PIPE(pipe);
3382 /* Must disable LP1+ watermarks too */
3383 dirty |= WM_DIRTY_LP_ALL;
3387 if (old->enable_fbc_wm != new->enable_fbc_wm) {
3388 dirty |= WM_DIRTY_FBC;
3389 /* Must disable LP1+ watermarks too */
3390 dirty |= WM_DIRTY_LP_ALL;
3393 if (old->partitioning != new->partitioning) {
3394 dirty |= WM_DIRTY_DDB;
3395 /* Must disable LP1+ watermarks too */
3396 dirty |= WM_DIRTY_LP_ALL;
3399 /* LP1+ watermarks already deemed dirty, no need to continue */
3400 if (dirty & WM_DIRTY_LP_ALL)
3403 /* Find the lowest numbered LP1+ watermark in need of an update... */
3404 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3405 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
3406 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
3410 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
3411 for (; wm_lp <= 3; wm_lp++)
3412 dirty |= WM_DIRTY_LP(wm_lp);
3417 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
3420 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3421 bool changed = false;
3423 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
3424 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
3425 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
3428 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
3429 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
3430 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
3433 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
3434 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
3435 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
3440 * Don't touch WM1S_LP_EN here.
3441 * Doing so could cause underruns.
3448 * The spec says we shouldn't write when we don't need, because every write
3449 * causes WMs to be re-evaluated, expending some power.
3451 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
3452 struct ilk_wm_values *results)
3454 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3458 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
3462 _ilk_disable_lp_wm(dev_priv, dirty);
3464 if (dirty & WM_DIRTY_PIPE(PIPE_A))
3465 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
3466 if (dirty & WM_DIRTY_PIPE(PIPE_B))
3467 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
3468 if (dirty & WM_DIRTY_PIPE(PIPE_C))
3469 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
3471 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
3472 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
3473 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
3474 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
3475 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
3476 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
3478 if (dirty & WM_DIRTY_DDB) {
3479 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3480 val = I915_READ(WM_MISC);
3481 if (results->partitioning == INTEL_DDB_PART_1_2)
3482 val &= ~WM_MISC_DATA_PARTITION_5_6;
3484 val |= WM_MISC_DATA_PARTITION_5_6;
3485 I915_WRITE(WM_MISC, val);
3487 val = I915_READ(DISP_ARB_CTL2);
3488 if (results->partitioning == INTEL_DDB_PART_1_2)
3489 val &= ~DISP_DATA_PARTITION_5_6;
3491 val |= DISP_DATA_PARTITION_5_6;
3492 I915_WRITE(DISP_ARB_CTL2, val);
3496 if (dirty & WM_DIRTY_FBC) {
3497 val = I915_READ(DISP_ARB_CTL);
3498 if (results->enable_fbc_wm)
3499 val &= ~DISP_FBC_WM_DIS;
3501 val |= DISP_FBC_WM_DIS;
3502 I915_WRITE(DISP_ARB_CTL, val);
3505 if (dirty & WM_DIRTY_LP(1) &&
3506 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
3507 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
3509 if (INTEL_GEN(dev_priv) >= 7) {
3510 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
3511 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
3512 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
3513 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
3516 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
3517 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
3518 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
3519 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
3520 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
3521 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
3523 dev_priv->wm.hw = *results;
3526 bool ilk_disable_lp_wm(struct drm_device *dev)
3528 struct drm_i915_private *dev_priv = to_i915(dev);
3530 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3533 #define SKL_SAGV_BLOCK_TIME 30 /* µs */
3536 * FIXME: We still don't have the proper code detect if we need to apply the WA,
3537 * so assume we'll always need it in order to avoid underruns.
3539 static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
3541 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3543 if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
3550 intel_has_sagv(struct drm_i915_private *dev_priv)
3552 if (IS_KABYLAKE(dev_priv))
3555 if (IS_SKYLAKE(dev_priv) &&
3556 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
3563 * SAGV dynamically adjusts the system agent voltage and clock frequencies
3564 * depending on power and performance requirements. The display engine access
3565 * to system memory is blocked during the adjustment time. Because of the
3566 * blocking time, having this enabled can cause full system hangs and/or pipe
3567 * underruns if we don't meet all of the following requirements:
3569 * - <= 1 pipe enabled
3570 * - All planes can enable watermarks for latencies >= SAGV engine block time
3571 * - We're not using an interlaced display configuration
3574 intel_enable_sagv(struct drm_i915_private *dev_priv)
3578 if (!intel_has_sagv(dev_priv))
3581 if (dev_priv->sagv_status == I915_SAGV_ENABLED)
3584 DRM_DEBUG_KMS("Enabling the SAGV\n");
3585 mutex_lock(&dev_priv->rps.hw_lock);
3587 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3590 /* We don't need to wait for the SAGV when enabling */
3591 mutex_unlock(&dev_priv->rps.hw_lock);
3594 * Some skl systems, pre-release machines in particular,
3595 * don't actually have an SAGV.
3597 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3598 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3599 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3601 } else if (ret < 0) {
3602 DRM_ERROR("Failed to enable the SAGV\n");
3606 dev_priv->sagv_status = I915_SAGV_ENABLED;
3611 intel_disable_sagv(struct drm_i915_private *dev_priv)
3615 if (!intel_has_sagv(dev_priv))
3618 if (dev_priv->sagv_status == I915_SAGV_DISABLED)
3621 DRM_DEBUG_KMS("Disabling the SAGV\n");
3622 mutex_lock(&dev_priv->rps.hw_lock);
3624 /* bspec says to keep retrying for at least 1 ms */
3625 ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3627 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
3629 mutex_unlock(&dev_priv->rps.hw_lock);
3632 * Some skl systems, pre-release machines in particular,
3633 * don't actually have an SAGV.
3635 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3636 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3637 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3639 } else if (ret < 0) {
3640 DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
3644 dev_priv->sagv_status = I915_SAGV_DISABLED;
3648 bool intel_can_enable_sagv(struct drm_atomic_state *state)
3650 struct drm_device *dev = state->dev;
3651 struct drm_i915_private *dev_priv = to_i915(dev);
3652 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3653 struct intel_crtc *crtc;
3654 struct intel_plane *plane;
3655 struct intel_crtc_state *cstate;
3659 if (!intel_has_sagv(dev_priv))
3663 * SKL workaround: bspec recommends we disable the SAGV when we have
3664 * more then one pipe enabled
3666 * If there are no active CRTCs, no additional checks need be performed
3668 if (hweight32(intel_state->active_crtcs) == 0)
3670 else if (hweight32(intel_state->active_crtcs) > 1)
3673 /* Since we're now guaranteed to only have one active CRTC... */
3674 pipe = ffs(intel_state->active_crtcs) - 1;
3675 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
3676 cstate = to_intel_crtc_state(crtc->base.state);
3678 if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3681 for_each_intel_plane_on_crtc(dev, crtc, plane) {
3682 struct skl_plane_wm *wm =
3683 &cstate->wm.skl.optimal.planes[plane->id];
3685 /* Skip this plane if it's not enabled */
3686 if (!wm->wm[0].plane_en)
3689 /* Find the highest enabled wm level for this plane */
3690 for (level = ilk_wm_max_level(dev_priv);
3691 !wm->wm[level].plane_en; --level)
3694 latency = dev_priv->wm.skl_latency[level];
3696 if (skl_needs_memory_bw_wa(intel_state) &&
3697 plane->base.state->fb->modifier ==
3698 I915_FORMAT_MOD_X_TILED)
3702 * If any of the planes on this pipe don't enable wm levels
3703 * that incur memory latencies higher then 30µs we can't enable
3706 if (latency < SKL_SAGV_BLOCK_TIME)
3714 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
3715 const struct intel_crtc_state *cstate,
3716 struct skl_ddb_entry *alloc, /* out */
3717 int *num_active /* out */)
3719 struct drm_atomic_state *state = cstate->base.state;
3720 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3721 struct drm_i915_private *dev_priv = to_i915(dev);
3722 struct drm_crtc *for_crtc = cstate->base.crtc;
3723 unsigned int pipe_size, ddb_size;
3724 int nth_active_pipe;
3726 if (WARN_ON(!state) || !cstate->base.active) {
3729 *num_active = hweight32(dev_priv->active_crtcs);
3733 if (intel_state->active_pipe_changes)
3734 *num_active = hweight32(intel_state->active_crtcs);
3736 *num_active = hweight32(dev_priv->active_crtcs);
3738 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
3739 WARN_ON(ddb_size == 0);
3741 ddb_size -= 4; /* 4 blocks for bypass path allocation */
3744 * If the state doesn't change the active CRTC's, then there's
3745 * no need to recalculate; the existing pipe allocation limits
3746 * should remain unchanged. Note that we're safe from racing
3747 * commits since any racing commit that changes the active CRTC
3748 * list would need to grab _all_ crtc locks, including the one
3749 * we currently hold.
3751 if (!intel_state->active_pipe_changes) {
3753 * alloc may be cleared by clear_intel_crtc_state,
3754 * copy from old state to be sure
3756 *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
3760 nth_active_pipe = hweight32(intel_state->active_crtcs &
3761 (drm_crtc_mask(for_crtc) - 1));
3762 pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
3763 alloc->start = nth_active_pipe * ddb_size / *num_active;
3764 alloc->end = alloc->start + pipe_size;
3767 static unsigned int skl_cursor_allocation(int num_active)
3769 if (num_active == 1)
3775 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
3777 entry->start = reg & 0x3ff;
3778 entry->end = (reg >> 16) & 0x3ff;
3783 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
3784 struct skl_ddb_allocation *ddb /* out */)
3786 struct intel_crtc *crtc;
3788 memset(ddb, 0, sizeof(*ddb));
3790 for_each_intel_crtc(&dev_priv->drm, crtc) {
3791 enum intel_display_power_domain power_domain;
3792 enum plane_id plane_id;
3793 enum pipe pipe = crtc->pipe;
3795 power_domain = POWER_DOMAIN_PIPE(pipe);
3796 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3799 for_each_plane_id_on_crtc(crtc, plane_id) {
3802 if (plane_id != PLANE_CURSOR)
3803 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
3805 val = I915_READ(CUR_BUF_CFG(pipe));
3807 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val);
3810 intel_display_power_put(dev_priv, power_domain);
3815 * Determines the downscale amount of a plane for the purposes of watermark calculations.
3816 * The bspec defines downscale amount as:
3819 * Horizontal down scale amount = maximum[1, Horizontal source size /
3820 * Horizontal destination size]
3821 * Vertical down scale amount = maximum[1, Vertical source size /
3822 * Vertical destination size]
3823 * Total down scale amount = Horizontal down scale amount *
3824 * Vertical down scale amount
3827 * Return value is provided in 16.16 fixed point form to retain fractional part.
3828 * Caller should take care of dividing & rounding off the value.
3831 skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
3832 const struct intel_plane_state *pstate)
3834 struct intel_plane *plane = to_intel_plane(pstate->base.plane);
3835 uint32_t downscale_h, downscale_w;
3836 uint32_t src_w, src_h, dst_w, dst_h;
3838 if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
3839 return DRM_PLANE_HELPER_NO_SCALING;
3841 /* n.b., src is 16.16 fixed point, dst is whole integer */
3842 if (plane->id == PLANE_CURSOR) {
3843 src_w = pstate->base.src_w;
3844 src_h = pstate->base.src_h;
3845 dst_w = pstate->base.crtc_w;
3846 dst_h = pstate->base.crtc_h;
3848 src_w = drm_rect_width(&pstate->base.src);
3849 src_h = drm_rect_height(&pstate->base.src);
3850 dst_w = drm_rect_width(&pstate->base.dst);
3851 dst_h = drm_rect_height(&pstate->base.dst);
3854 if (drm_rotation_90_or_270(pstate->base.rotation))
3857 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3858 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3860 /* Provide result in 16.16 fixed point */
3861 return (uint64_t)downscale_w * downscale_h >> 16;
3865 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
3866 const struct drm_plane_state *pstate,
3869 struct intel_plane *plane = to_intel_plane(pstate->plane);
3870 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3871 uint32_t down_scale_amount, data_rate;
3872 uint32_t width = 0, height = 0;
3873 struct drm_framebuffer *fb;
3876 if (!intel_pstate->base.visible)
3880 format = fb->format->format;
3882 if (plane->id == PLANE_CURSOR)
3884 if (y && format != DRM_FORMAT_NV12)
3887 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3888 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3890 if (drm_rotation_90_or_270(pstate->rotation))
3891 swap(width, height);
3893 /* for planar format */
3894 if (format == DRM_FORMAT_NV12) {
3895 if (y) /* y-plane data rate */
3896 data_rate = width * height *
3898 else /* uv-plane data rate */
3899 data_rate = (width / 2) * (height / 2) *
3902 /* for packed formats */
3903 data_rate = width * height * fb->format->cpp[0];
3906 down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
3908 return (uint64_t)data_rate * down_scale_amount >> 16;
3912 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
3913 * a 8192x4096@32bpp framebuffer:
3914 * 3 * 4096 * 8192 * 4 < 2^32
3917 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
3918 unsigned *plane_data_rate,
3919 unsigned *plane_y_data_rate)
3921 struct drm_crtc_state *cstate = &intel_cstate->base;
3922 struct drm_atomic_state *state = cstate->state;
3923 struct drm_plane *plane;
3924 const struct drm_plane_state *pstate;
3925 unsigned int total_data_rate = 0;
3927 if (WARN_ON(!state))
3930 /* Calculate and cache data rate for each plane */
3931 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
3932 enum plane_id plane_id = to_intel_plane(plane)->id;
3936 rate = skl_plane_relative_data_rate(intel_cstate,
3938 plane_data_rate[plane_id] = rate;
3940 total_data_rate += rate;
3943 rate = skl_plane_relative_data_rate(intel_cstate,
3945 plane_y_data_rate[plane_id] = rate;
3947 total_data_rate += rate;
3950 return total_data_rate;
3954 skl_ddb_min_alloc(const struct drm_plane_state *pstate,
3957 struct drm_framebuffer *fb = pstate->fb;
3958 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3959 uint32_t src_w, src_h;
3960 uint32_t min_scanlines = 8;
3966 /* For packed formats, no y-plane, return 0 */
3967 if (y && fb->format->format != DRM_FORMAT_NV12)
3970 /* For Non Y-tile return 8-blocks */
3971 if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
3972 fb->modifier != I915_FORMAT_MOD_Yf_TILED)
3975 src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
3976 src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
3978 if (drm_rotation_90_or_270(pstate->rotation))
3981 /* Halve UV plane width and height for NV12 */
3982 if (fb->format->format == DRM_FORMAT_NV12 && !y) {
3987 if (fb->format->format == DRM_FORMAT_NV12 && !y)
3988 plane_bpp = fb->format->cpp[1];
3990 plane_bpp = fb->format->cpp[0];
3992 if (drm_rotation_90_or_270(pstate->rotation)) {
3993 switch (plane_bpp) {
4007 WARN(1, "Unsupported pixel depth %u for rotation",
4013 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
4017 skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
4018 uint16_t *minimum, uint16_t *y_minimum)
4020 const struct drm_plane_state *pstate;
4021 struct drm_plane *plane;
4023 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
4024 enum plane_id plane_id = to_intel_plane(plane)->id;
4026 if (plane_id == PLANE_CURSOR)
4029 if (!pstate->visible)
4032 minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
4033 y_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
4036 minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
4040 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4041 struct skl_ddb_allocation *ddb /* out */)
4043 struct drm_atomic_state *state = cstate->base.state;
4044 struct drm_crtc *crtc = cstate->base.crtc;
4045 struct drm_device *dev = crtc->dev;
4046 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4047 enum pipe pipe = intel_crtc->pipe;
4048 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
4049 uint16_t alloc_size, start;
4050 uint16_t minimum[I915_MAX_PLANES] = {};
4051 uint16_t y_minimum[I915_MAX_PLANES] = {};
4052 unsigned int total_data_rate;
4053 enum plane_id plane_id;
4055 unsigned plane_data_rate[I915_MAX_PLANES] = {};
4056 unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
4058 /* Clear the partitioning for disabled planes. */
4059 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
4060 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
4062 if (WARN_ON(!state))
4065 if (!cstate->base.active) {
4066 alloc->start = alloc->end = 0;
4070 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
4071 alloc_size = skl_ddb_entry_size(alloc);
4072 if (alloc_size == 0) {
4073 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
4077 skl_ddb_calc_min(cstate, num_active, minimum, y_minimum);
4080 * 1. Allocate the mininum required blocks for each active plane
4081 * and allocate the cursor, it doesn't require extra allocation
4082 * proportional to the data rate.
4085 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4086 alloc_size -= minimum[plane_id];
4087 alloc_size -= y_minimum[plane_id];
4090 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
4091 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
4094 * 2. Distribute the remaining space in proportion to the amount of
4095 * data each plane needs to fetch from memory.
4097 * FIXME: we may not allocate every single block here.
4099 total_data_rate = skl_get_total_relative_data_rate(cstate,
4102 if (total_data_rate == 0)
4105 start = alloc->start;
4106 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4107 unsigned int data_rate, y_data_rate;
4108 uint16_t plane_blocks, y_plane_blocks = 0;
4110 if (plane_id == PLANE_CURSOR)
4113 data_rate = plane_data_rate[plane_id];
4116 * allocation for (packed formats) or (uv-plane part of planar format):
4117 * promote the expression to 64 bits to avoid overflowing, the
4118 * result is < available as data_rate / total_data_rate < 1
4120 plane_blocks = minimum[plane_id];
4121 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
4124 /* Leave disabled planes at (0,0) */
4126 ddb->plane[pipe][plane_id].start = start;
4127 ddb->plane[pipe][plane_id].end = start + plane_blocks;
4130 start += plane_blocks;
4133 * allocation for y_plane part of planar format:
4135 y_data_rate = plane_y_data_rate[plane_id];
4137 y_plane_blocks = y_minimum[plane_id];
4138 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
4142 ddb->y_plane[pipe][plane_id].start = start;
4143 ddb->y_plane[pipe][plane_id].end = start + y_plane_blocks;
4146 start += y_plane_blocks;
4153 * The max latency should be 257 (max the punit can code is 255 and we add 2us
4154 * for the read latency) and cpp should always be <= 8, so that
4155 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
4156 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
4158 static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp,
4161 uint32_t wm_intermediate_val;
4162 uint_fixed_16_16_t ret;
4165 return FP_16_16_MAX;
4167 wm_intermediate_val = latency * pixel_rate * cpp;
4168 ret = fixed_16_16_div_round_up_u64(wm_intermediate_val, 1000 * 512);
4172 static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
4173 uint32_t pipe_htotal,
4175 uint_fixed_16_16_t plane_blocks_per_line)
4177 uint32_t wm_intermediate_val;
4178 uint_fixed_16_16_t ret;
4181 return FP_16_16_MAX;
4183 wm_intermediate_val = latency * pixel_rate;
4184 wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
4185 pipe_htotal * 1000);
4186 ret = mul_u32_fixed_16_16(wm_intermediate_val, plane_blocks_per_line);
4190 static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
4191 struct intel_plane_state *pstate)
4193 uint64_t adjusted_pixel_rate;
4194 uint64_t downscale_amount;
4195 uint64_t pixel_rate;
4197 /* Shouldn't reach here on disabled planes... */
4198 if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
4202 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
4203 * with additional adjustments for plane-specific scaling.
4205 adjusted_pixel_rate = cstate->pixel_rate;
4206 downscale_amount = skl_plane_downscale_amount(cstate, pstate);
4208 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
4209 WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
4214 static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4215 struct intel_crtc_state *cstate,
4216 struct intel_plane_state *intel_pstate,
4217 uint16_t ddb_allocation,
4219 uint16_t *out_blocks, /* out */
4220 uint8_t *out_lines, /* out */
4221 bool *enabled /* out */)
4223 struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
4224 struct drm_plane_state *pstate = &intel_pstate->base;
4225 struct drm_framebuffer *fb = pstate->fb;
4226 uint32_t latency = dev_priv->wm.skl_latency[level];
4227 uint_fixed_16_16_t method1, method2;
4228 uint_fixed_16_16_t plane_blocks_per_line;
4229 uint_fixed_16_16_t selected_result;
4230 uint32_t interm_pbpl;
4231 uint32_t plane_bytes_per_line;
4232 uint32_t res_blocks, res_lines;
4234 uint32_t width = 0, height = 0;
4235 uint32_t plane_pixel_rate;
4236 uint_fixed_16_16_t y_tile_minimum;
4237 uint32_t y_min_scanlines;
4238 struct intel_atomic_state *state =
4239 to_intel_atomic_state(cstate->base.state);
4240 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
4241 bool y_tiled, x_tiled;
4244 !intel_wm_plane_visible(cstate, intel_pstate)) {
4249 y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
4250 fb->modifier == I915_FORMAT_MOD_Yf_TILED;
4251 x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
4253 /* Display WA #1141: kbl. */
4254 if (IS_KABYLAKE(dev_priv) && dev_priv->ipc_enabled)
4257 if (apply_memory_bw_wa && x_tiled)
4260 if (plane->id == PLANE_CURSOR) {
4261 width = intel_pstate->base.crtc_w;
4262 height = intel_pstate->base.crtc_h;
4264 width = drm_rect_width(&intel_pstate->base.src) >> 16;
4265 height = drm_rect_height(&intel_pstate->base.src) >> 16;
4268 if (drm_rotation_90_or_270(pstate->rotation))
4269 swap(width, height);
4271 cpp = fb->format->cpp[0];
4272 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
4274 if (drm_rotation_90_or_270(pstate->rotation)) {
4275 int cpp = (fb->format->format == DRM_FORMAT_NV12) ?
4276 fb->format->cpp[1] :
4281 y_min_scanlines = 16;
4284 y_min_scanlines = 8;
4287 y_min_scanlines = 4;
4294 y_min_scanlines = 4;
4297 if (apply_memory_bw_wa)
4298 y_min_scanlines *= 2;
4300 plane_bytes_per_line = width * cpp;
4302 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line *
4303 y_min_scanlines, 512);
4304 plane_blocks_per_line =
4305 fixed_16_16_div_round_up(interm_pbpl, y_min_scanlines);
4306 } else if (x_tiled) {
4307 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512);
4308 plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
4310 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
4311 plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
4314 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
4315 method2 = skl_wm_method2(plane_pixel_rate,
4316 cstate->base.adjusted_mode.crtc_htotal,
4318 plane_blocks_per_line);
4320 y_tile_minimum = mul_u32_fixed_16_16(y_min_scanlines,
4321 plane_blocks_per_line);
4324 selected_result = max_fixed_16_16(method2, y_tile_minimum);
4326 if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
4327 (plane_bytes_per_line / 512 < 1))
4328 selected_result = method2;
4329 else if ((ddb_allocation /
4330 fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1)
4331 selected_result = min_fixed_16_16(method1, method2);
4333 selected_result = method1;
4336 res_blocks = fixed_16_16_to_u32_round_up(selected_result) + 1;
4337 res_lines = DIV_ROUND_UP(selected_result.val,
4338 plane_blocks_per_line.val);
4340 if (level >= 1 && level <= 7) {
4342 res_blocks += fixed_16_16_to_u32_round_up(y_tile_minimum);
4343 res_lines += y_min_scanlines;
4349 if (res_blocks >= ddb_allocation || res_lines > 31) {
4353 * If there are no valid level 0 watermarks, then we can't
4354 * support this display configuration.
4359 struct drm_plane *plane = pstate->plane;
4361 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
4362 DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n",
4363 plane->base.id, plane->name,
4364 res_blocks, ddb_allocation, res_lines);
4369 *out_blocks = res_blocks;
4370 *out_lines = res_lines;
4377 skl_compute_wm_level(const struct drm_i915_private *dev_priv,
4378 struct skl_ddb_allocation *ddb,
4379 struct intel_crtc_state *cstate,
4380 struct intel_plane *intel_plane,
4382 struct skl_wm_level *result)
4384 struct drm_atomic_state *state = cstate->base.state;
4385 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4386 struct drm_plane *plane = &intel_plane->base;
4387 struct intel_plane_state *intel_pstate = NULL;
4388 uint16_t ddb_blocks;
4389 enum pipe pipe = intel_crtc->pipe;
4394 intel_atomic_get_existing_plane_state(state,
4398 * Note: If we start supporting multiple pending atomic commits against
4399 * the same planes/CRTC's in the future, plane->state will no longer be
4400 * the correct pre-state to use for the calculations here and we'll
4401 * need to change where we get the 'unchanged' plane data from.
4403 * For now this is fine because we only allow one queued commit against
4404 * a CRTC. Even if the plane isn't modified by this transaction and we
4405 * don't have a plane lock, we still have the CRTC's lock, so we know
4406 * that no other transactions are racing with us to update it.
4409 intel_pstate = to_intel_plane_state(plane->state);
4411 WARN_ON(!intel_pstate->base.fb);
4413 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][intel_plane->id]);
4415 ret = skl_compute_plane_wm(dev_priv,
4420 &result->plane_res_b,
4421 &result->plane_res_l,
4430 skl_compute_linetime_wm(struct intel_crtc_state *cstate)
4432 struct drm_atomic_state *state = cstate->base.state;
4433 struct drm_i915_private *dev_priv = to_i915(state->dev);
4434 uint32_t pixel_rate;
4435 uint32_t linetime_wm;
4437 if (!cstate->base.active)
4440 pixel_rate = cstate->pixel_rate;
4442 if (WARN_ON(pixel_rate == 0))
4445 linetime_wm = DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal *
4448 /* Display WA #1135: bxt. */
4449 if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled)
4450 linetime_wm = DIV_ROUND_UP(linetime_wm, 2);
4455 static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
4456 struct skl_wm_level *trans_wm /* out */)
4458 if (!cstate->base.active)
4461 /* Until we know more, just disable transition WMs */
4462 trans_wm->plane_en = false;
4465 static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
4466 struct skl_ddb_allocation *ddb,
4467 struct skl_pipe_wm *pipe_wm)
4469 struct drm_device *dev = cstate->base.crtc->dev;
4470 const struct drm_i915_private *dev_priv = to_i915(dev);
4471 struct intel_plane *intel_plane;
4472 struct skl_plane_wm *wm;
4473 int level, max_level = ilk_wm_max_level(dev_priv);
4477 * We'll only calculate watermarks for planes that are actually
4478 * enabled, so make sure all other planes are set as disabled.
4480 memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
4482 for_each_intel_plane_mask(&dev_priv->drm,
4484 cstate->base.plane_mask) {
4485 wm = &pipe_wm->planes[intel_plane->id];
4487 for (level = 0; level <= max_level; level++) {
4488 ret = skl_compute_wm_level(dev_priv, ddb, cstate,
4494 skl_compute_transition_wm(cstate, &wm->trans_wm);
4496 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
4501 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
4503 const struct skl_ddb_entry *entry)
4506 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
4511 static void skl_write_wm_level(struct drm_i915_private *dev_priv,
4513 const struct skl_wm_level *level)
4517 if (level->plane_en) {
4519 val |= level->plane_res_b;
4520 val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
4523 I915_WRITE(reg, val);
4526 static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
4527 const struct skl_plane_wm *wm,
4528 const struct skl_ddb_allocation *ddb,
4529 enum plane_id plane_id)
4531 struct drm_crtc *crtc = &intel_crtc->base;
4532 struct drm_device *dev = crtc->dev;
4533 struct drm_i915_private *dev_priv = to_i915(dev);
4534 int level, max_level = ilk_wm_max_level(dev_priv);
4535 enum pipe pipe = intel_crtc->pipe;
4537 for (level = 0; level <= max_level; level++) {
4538 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
4541 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
4544 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
4545 &ddb->plane[pipe][plane_id]);
4546 skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane_id),
4547 &ddb->y_plane[pipe][plane_id]);
4550 static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
4551 const struct skl_plane_wm *wm,
4552 const struct skl_ddb_allocation *ddb)
4554 struct drm_crtc *crtc = &intel_crtc->base;
4555 struct drm_device *dev = crtc->dev;
4556 struct drm_i915_private *dev_priv = to_i915(dev);
4557 int level, max_level = ilk_wm_max_level(dev_priv);
4558 enum pipe pipe = intel_crtc->pipe;
4560 for (level = 0; level <= max_level; level++) {
4561 skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
4564 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
4566 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
4567 &ddb->plane[pipe][PLANE_CURSOR]);
4570 bool skl_wm_level_equals(const struct skl_wm_level *l1,
4571 const struct skl_wm_level *l2)
4573 if (l1->plane_en != l2->plane_en)
4576 /* If both planes aren't enabled, the rest shouldn't matter */
4580 return (l1->plane_res_l == l2->plane_res_l &&
4581 l1->plane_res_b == l2->plane_res_b);
4584 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
4585 const struct skl_ddb_entry *b)
4587 return a->start < b->end && b->start < a->end;
4590 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
4591 const struct skl_ddb_entry *ddb,
4596 for (i = 0; i < I915_MAX_PIPES; i++)
4597 if (i != ignore && entries[i] &&
4598 skl_ddb_entries_overlap(ddb, entries[i]))
4604 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
4605 const struct skl_pipe_wm *old_pipe_wm,
4606 struct skl_pipe_wm *pipe_wm, /* out */
4607 struct skl_ddb_allocation *ddb, /* out */
4608 bool *changed /* out */)
4610 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
4613 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
4617 if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm)))
4626 pipes_modified(struct drm_atomic_state *state)
4628 struct drm_crtc *crtc;
4629 struct drm_crtc_state *cstate;
4630 uint32_t i, ret = 0;
4632 for_each_new_crtc_in_state(state, crtc, cstate, i)
4633 ret |= drm_crtc_mask(crtc);
4639 skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
4641 struct drm_atomic_state *state = cstate->base.state;
4642 struct drm_device *dev = state->dev;
4643 struct drm_crtc *crtc = cstate->base.crtc;
4644 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4645 struct drm_i915_private *dev_priv = to_i915(dev);
4646 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4647 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
4648 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
4649 struct drm_plane_state *plane_state;
4650 struct drm_plane *plane;
4651 enum pipe pipe = intel_crtc->pipe;
4653 WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
4655 drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
4656 enum plane_id plane_id = to_intel_plane(plane)->id;
4658 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
4659 &new_ddb->plane[pipe][plane_id]) &&
4660 skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][plane_id],
4661 &new_ddb->y_plane[pipe][plane_id]))
4664 plane_state = drm_atomic_get_plane_state(state, plane);
4665 if (IS_ERR(plane_state))
4666 return PTR_ERR(plane_state);
4673 skl_compute_ddb(struct drm_atomic_state *state)
4675 struct drm_device *dev = state->dev;
4676 struct drm_i915_private *dev_priv = to_i915(dev);
4677 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4678 struct intel_crtc *intel_crtc;
4679 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
4680 uint32_t realloc_pipes = pipes_modified(state);
4684 * If this is our first atomic update following hardware readout,
4685 * we can't trust the DDB that the BIOS programmed for us. Let's
4686 * pretend that all pipes switched active status so that we'll
4687 * ensure a full DDB recompute.
4689 if (dev_priv->wm.distrust_bios_wm) {
4690 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4691 state->acquire_ctx);
4695 intel_state->active_pipe_changes = ~0;
4698 * We usually only initialize intel_state->active_crtcs if we
4699 * we're doing a modeset; make sure this field is always
4700 * initialized during the sanitization process that happens
4701 * on the first commit too.
4703 if (!intel_state->modeset)
4704 intel_state->active_crtcs = dev_priv->active_crtcs;
4708 * If the modeset changes which CRTC's are active, we need to
4709 * recompute the DDB allocation for *all* active pipes, even
4710 * those that weren't otherwise being modified in any way by this
4711 * atomic commit. Due to the shrinking of the per-pipe allocations
4712 * when new active CRTC's are added, it's possible for a pipe that
4713 * we were already using and aren't changing at all here to suddenly
4714 * become invalid if its DDB needs exceeds its new allocation.
4716 * Note that if we wind up doing a full DDB recompute, we can't let
4717 * any other display updates race with this transaction, so we need
4718 * to grab the lock on *all* CRTC's.
4720 if (intel_state->active_pipe_changes) {
4722 intel_state->wm_results.dirty_pipes = ~0;
4726 * We're not recomputing for the pipes not included in the commit, so
4727 * make sure we start with the current state.
4729 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
4731 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
4732 struct intel_crtc_state *cstate;
4734 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
4736 return PTR_ERR(cstate);
4738 ret = skl_allocate_pipe_ddb(cstate, ddb);
4742 ret = skl_ddb_add_affected_planes(cstate);
4751 skl_copy_wm_for_pipe(struct skl_wm_values *dst,
4752 struct skl_wm_values *src,
4755 memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
4756 sizeof(dst->ddb.y_plane[pipe]));
4757 memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
4758 sizeof(dst->ddb.plane[pipe]));
4762 skl_print_wm_changes(const struct drm_atomic_state *state)
4764 const struct drm_device *dev = state->dev;
4765 const struct drm_i915_private *dev_priv = to_i915(dev);
4766 const struct intel_atomic_state *intel_state =
4767 to_intel_atomic_state(state);
4768 const struct drm_crtc *crtc;
4769 const struct drm_crtc_state *cstate;
4770 const struct intel_plane *intel_plane;
4771 const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
4772 const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
4775 for_each_new_crtc_in_state(state, crtc, cstate, i) {
4776 const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4777 enum pipe pipe = intel_crtc->pipe;
4779 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
4780 enum plane_id plane_id = intel_plane->id;
4781 const struct skl_ddb_entry *old, *new;
4783 old = &old_ddb->plane[pipe][plane_id];
4784 new = &new_ddb->plane[pipe][plane_id];
4786 if (skl_ddb_entry_equal(old, new))
4789 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
4790 intel_plane->base.base.id,
4791 intel_plane->base.name,
4792 old->start, old->end,
4793 new->start, new->end);
4799 skl_compute_wm(struct drm_atomic_state *state)
4801 struct drm_crtc *crtc;
4802 struct drm_crtc_state *cstate;
4803 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4804 struct skl_wm_values *results = &intel_state->wm_results;
4805 struct skl_pipe_wm *pipe_wm;
4806 bool changed = false;
4810 * If this transaction isn't actually touching any CRTC's, don't
4811 * bother with watermark calculation. Note that if we pass this
4812 * test, we're guaranteed to hold at least one CRTC state mutex,
4813 * which means we can safely use values like dev_priv->active_crtcs
4814 * since any racing commits that want to update them would need to
4815 * hold _all_ CRTC state mutexes.
4817 for_each_new_crtc_in_state(state, crtc, cstate, i)
4822 /* Clear all dirty flags */
4823 results->dirty_pipes = 0;
4825 ret = skl_compute_ddb(state);
4830 * Calculate WM's for all pipes that are part of this transaction.
4831 * Note that the DDB allocation above may have added more CRTC's that
4832 * weren't otherwise being modified (and set bits in dirty_pipes) if
4833 * pipe allocations had to change.
4835 * FIXME: Now that we're doing this in the atomic check phase, we
4836 * should allow skl_update_pipe_wm() to return failure in cases where
4837 * no suitable watermark values can be found.
4839 for_each_new_crtc_in_state(state, crtc, cstate, i) {
4840 struct intel_crtc_state *intel_cstate =
4841 to_intel_crtc_state(cstate);
4842 const struct skl_pipe_wm *old_pipe_wm =
4843 &to_intel_crtc_state(crtc->state)->wm.skl.optimal;
4845 pipe_wm = &intel_cstate->wm.skl.optimal;
4846 ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
4847 &results->ddb, &changed);
4852 results->dirty_pipes |= drm_crtc_mask(crtc);
4854 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
4855 /* This pipe's WM's did not change */
4858 intel_cstate->update_wm_pre = true;
4861 skl_print_wm_changes(state);
4866 static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
4867 struct intel_crtc_state *cstate)
4869 struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
4870 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4871 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
4872 const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
4873 enum pipe pipe = crtc->pipe;
4874 enum plane_id plane_id;
4876 if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
4879 I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
4881 for_each_plane_id_on_crtc(crtc, plane_id) {
4882 if (plane_id != PLANE_CURSOR)
4883 skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
4886 skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
4891 static void skl_initial_wm(struct intel_atomic_state *state,
4892 struct intel_crtc_state *cstate)
4894 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4895 struct drm_device *dev = intel_crtc->base.dev;
4896 struct drm_i915_private *dev_priv = to_i915(dev);
4897 struct skl_wm_values *results = &state->wm_results;
4898 struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
4899 enum pipe pipe = intel_crtc->pipe;
4901 if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
4904 mutex_lock(&dev_priv->wm.wm_mutex);
4906 if (cstate->base.active_changed)
4907 skl_atomic_update_crtc_wm(state, cstate);
4909 skl_copy_wm_for_pipe(hw_vals, results, pipe);
4911 mutex_unlock(&dev_priv->wm.wm_mutex);
4914 static void ilk_compute_wm_config(struct drm_device *dev,
4915 struct intel_wm_config *config)
4917 struct intel_crtc *crtc;
4919 /* Compute the currently _active_ config */
4920 for_each_intel_crtc(dev, crtc) {
4921 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
4923 if (!wm->pipe_enabled)
4926 config->sprites_enabled |= wm->sprites_enabled;
4927 config->sprites_scaled |= wm->sprites_scaled;
4928 config->num_pipes_active++;
4932 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
4934 struct drm_device *dev = &dev_priv->drm;
4935 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
4936 struct ilk_wm_maximums max;
4937 struct intel_wm_config config = {};
4938 struct ilk_wm_values results = {};
4939 enum intel_ddb_partitioning partitioning;
4941 ilk_compute_wm_config(dev, &config);
4943 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
4944 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
4946 /* 5/6 split only in single pipe config on IVB+ */
4947 if (INTEL_GEN(dev_priv) >= 7 &&
4948 config.num_pipes_active == 1 && config.sprites_enabled) {
4949 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
4950 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
4952 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
4954 best_lp_wm = &lp_wm_1_2;
4957 partitioning = (best_lp_wm == &lp_wm_1_2) ?
4958 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
4960 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
4962 ilk_write_wm_values(dev_priv, &results);
4965 static void ilk_initial_watermarks(struct intel_atomic_state *state,
4966 struct intel_crtc_state *cstate)
4968 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4969 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4971 mutex_lock(&dev_priv->wm.wm_mutex);
4972 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
4973 ilk_program_watermarks(dev_priv);
4974 mutex_unlock(&dev_priv->wm.wm_mutex);
4977 static void ilk_optimize_watermarks(struct intel_atomic_state *state,
4978 struct intel_crtc_state *cstate)
4980 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4981 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4983 mutex_lock(&dev_priv->wm.wm_mutex);
4984 if (cstate->wm.need_postvbl_update) {
4985 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
4986 ilk_program_watermarks(dev_priv);
4988 mutex_unlock(&dev_priv->wm.wm_mutex);
4991 static inline void skl_wm_level_from_reg_val(uint32_t val,
4992 struct skl_wm_level *level)
4994 level->plane_en = val & PLANE_WM_EN;
4995 level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
4996 level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
4997 PLANE_WM_LINES_MASK;
5000 void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
5001 struct skl_pipe_wm *out)
5003 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5004 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5005 enum pipe pipe = intel_crtc->pipe;
5006 int level, max_level;
5007 enum plane_id plane_id;
5010 max_level = ilk_wm_max_level(dev_priv);
5012 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
5013 struct skl_plane_wm *wm = &out->planes[plane_id];
5015 for (level = 0; level <= max_level; level++) {
5016 if (plane_id != PLANE_CURSOR)
5017 val = I915_READ(PLANE_WM(pipe, plane_id, level));
5019 val = I915_READ(CUR_WM(pipe, level));
5021 skl_wm_level_from_reg_val(val, &wm->wm[level]);
5024 if (plane_id != PLANE_CURSOR)
5025 val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
5027 val = I915_READ(CUR_WM_TRANS(pipe));
5029 skl_wm_level_from_reg_val(val, &wm->trans_wm);
5032 if (!intel_crtc->active)
5035 out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
5038 void skl_wm_get_hw_state(struct drm_device *dev)
5040 struct drm_i915_private *dev_priv = to_i915(dev);
5041 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
5042 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
5043 struct drm_crtc *crtc;
5044 struct intel_crtc *intel_crtc;
5045 struct intel_crtc_state *cstate;
5047 skl_ddb_get_hw_state(dev_priv, ddb);
5048 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5049 intel_crtc = to_intel_crtc(crtc);
5050 cstate = to_intel_crtc_state(crtc->state);
5052 skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
5054 if (intel_crtc->active)
5055 hw->dirty_pipes |= drm_crtc_mask(crtc);
5058 if (dev_priv->active_crtcs) {
5059 /* Fully recompute DDB on first atomic commit */
5060 dev_priv->wm.distrust_bios_wm = true;
5062 /* Easy/common case; just sanitize DDB now if everything off */
5063 memset(ddb, 0, sizeof(*ddb));
5067 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
5069 struct drm_device *dev = crtc->dev;
5070 struct drm_i915_private *dev_priv = to_i915(dev);
5071 struct ilk_wm_values *hw = &dev_priv->wm.hw;
5072 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5073 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
5074 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
5075 enum pipe pipe = intel_crtc->pipe;
5076 static const i915_reg_t wm0_pipe_reg[] = {
5077 [PIPE_A] = WM0_PIPEA_ILK,
5078 [PIPE_B] = WM0_PIPEB_ILK,
5079 [PIPE_C] = WM0_PIPEC_IVB,
5082 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
5083 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5084 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
5086 memset(active, 0, sizeof(*active));
5088 active->pipe_enabled = intel_crtc->active;
5090 if (active->pipe_enabled) {
5091 u32 tmp = hw->wm_pipe[pipe];
5094 * For active pipes LP0 watermark is marked as
5095 * enabled, and LP1+ watermaks as disabled since
5096 * we can't really reverse compute them in case
5097 * multiple pipes are active.
5099 active->wm[0].enable = true;
5100 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
5101 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
5102 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
5103 active->linetime = hw->wm_linetime[pipe];
5105 int level, max_level = ilk_wm_max_level(dev_priv);
5108 * For inactive pipes, all watermark levels
5109 * should be marked as enabled but zeroed,
5110 * which is what we'd compute them to.
5112 for (level = 0; level <= max_level; level++)
5113 active->wm[level].enable = true;
5116 intel_crtc->wm.active.ilk = *active;
5119 #define _FW_WM(value, plane) \
5120 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
5121 #define _FW_WM_VLV(value, plane) \
5122 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
5124 static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
5125 struct g4x_wm_values *wm)
5129 tmp = I915_READ(DSPFW1);
5130 wm->sr.plane = _FW_WM(tmp, SR);
5131 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
5132 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
5133 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
5135 tmp = I915_READ(DSPFW2);
5136 wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
5137 wm->sr.fbc = _FW_WM(tmp, FBC_SR);
5138 wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
5139 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
5140 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
5141 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
5143 tmp = I915_READ(DSPFW3);
5144 wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
5145 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
5146 wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
5147 wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
5150 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
5151 struct vlv_wm_values *wm)
5156 for_each_pipe(dev_priv, pipe) {
5157 tmp = I915_READ(VLV_DDL(pipe));
5159 wm->ddl[pipe].plane[PLANE_PRIMARY] =
5160 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5161 wm->ddl[pipe].plane[PLANE_CURSOR] =
5162 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5163 wm->ddl[pipe].plane[PLANE_SPRITE0] =
5164 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5165 wm->ddl[pipe].plane[PLANE_SPRITE1] =
5166 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5169 tmp = I915_READ(DSPFW1);
5170 wm->sr.plane = _FW_WM(tmp, SR);
5171 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
5172 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
5173 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
5175 tmp = I915_READ(DSPFW2);
5176 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
5177 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
5178 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
5180 tmp = I915_READ(DSPFW3);
5181 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
5183 if (IS_CHERRYVIEW(dev_priv)) {
5184 tmp = I915_READ(DSPFW7_CHV);
5185 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
5186 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5188 tmp = I915_READ(DSPFW8_CHV);
5189 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
5190 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
5192 tmp = I915_READ(DSPFW9_CHV);
5193 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
5194 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
5196 tmp = I915_READ(DSPHOWM);
5197 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5198 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
5199 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
5200 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
5201 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
5202 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
5203 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
5204 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
5205 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
5206 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5208 tmp = I915_READ(DSPFW7);
5209 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
5210 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5212 tmp = I915_READ(DSPHOWM);
5213 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5214 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
5215 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
5216 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
5217 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
5218 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
5219 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5226 void g4x_wm_get_hw_state(struct drm_device *dev)
5228 struct drm_i915_private *dev_priv = to_i915(dev);
5229 struct g4x_wm_values *wm = &dev_priv->wm.g4x;
5230 struct intel_crtc *crtc;
5232 g4x_read_wm_values(dev_priv, wm);
5234 wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
5236 for_each_intel_crtc(dev, crtc) {
5237 struct intel_crtc_state *crtc_state =
5238 to_intel_crtc_state(crtc->base.state);
5239 struct g4x_wm_state *active = &crtc->wm.active.g4x;
5240 struct g4x_pipe_wm *raw;
5241 enum pipe pipe = crtc->pipe;
5242 enum plane_id plane_id;
5243 int level, max_level;
5245 active->cxsr = wm->cxsr;
5246 active->hpll_en = wm->hpll_en;
5247 active->fbc_en = wm->fbc_en;
5249 active->sr = wm->sr;
5250 active->hpll = wm->hpll;
5252 for_each_plane_id_on_crtc(crtc, plane_id) {
5253 active->wm.plane[plane_id] =
5254 wm->pipe[pipe].plane[plane_id];
5257 if (wm->cxsr && wm->hpll_en)
5258 max_level = G4X_WM_LEVEL_HPLL;
5260 max_level = G4X_WM_LEVEL_SR;
5262 max_level = G4X_WM_LEVEL_NORMAL;
5264 level = G4X_WM_LEVEL_NORMAL;
5265 raw = &crtc_state->wm.g4x.raw[level];
5266 for_each_plane_id_on_crtc(crtc, plane_id)
5267 raw->plane[plane_id] = active->wm.plane[plane_id];
5269 if (++level > max_level)
5272 raw = &crtc_state->wm.g4x.raw[level];
5273 raw->plane[PLANE_PRIMARY] = active->sr.plane;
5274 raw->plane[PLANE_CURSOR] = active->sr.cursor;
5275 raw->plane[PLANE_SPRITE0] = 0;
5276 raw->fbc = active->sr.fbc;
5278 if (++level > max_level)
5281 raw = &crtc_state->wm.g4x.raw[level];
5282 raw->plane[PLANE_PRIMARY] = active->hpll.plane;
5283 raw->plane[PLANE_CURSOR] = active->hpll.cursor;
5284 raw->plane[PLANE_SPRITE0] = 0;
5285 raw->fbc = active->hpll.fbc;
5288 for_each_plane_id_on_crtc(crtc, plane_id)
5289 g4x_raw_plane_wm_set(crtc_state, level,
5290 plane_id, USHRT_MAX);
5291 g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
5293 crtc_state->wm.g4x.optimal = *active;
5294 crtc_state->wm.g4x.intermediate = *active;
5296 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
5298 wm->pipe[pipe].plane[PLANE_PRIMARY],
5299 wm->pipe[pipe].plane[PLANE_CURSOR],
5300 wm->pipe[pipe].plane[PLANE_SPRITE0]);
5303 DRM_DEBUG_KMS("Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
5304 wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
5305 DRM_DEBUG_KMS("Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
5306 wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
5307 DRM_DEBUG_KMS("Initial SR=%s HPLL=%s FBC=%s\n",
5308 yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
5311 void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
5313 struct intel_plane *plane;
5314 struct intel_crtc *crtc;
5316 mutex_lock(&dev_priv->wm.wm_mutex);
5318 for_each_intel_plane(&dev_priv->drm, plane) {
5319 struct intel_crtc *crtc =
5320 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
5321 struct intel_crtc_state *crtc_state =
5322 to_intel_crtc_state(crtc->base.state);
5323 struct intel_plane_state *plane_state =
5324 to_intel_plane_state(plane->base.state);
5325 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
5326 enum plane_id plane_id = plane->id;
5329 if (plane_state->base.visible)
5332 for (level = 0; level < 3; level++) {
5333 struct g4x_pipe_wm *raw =
5334 &crtc_state->wm.g4x.raw[level];
5336 raw->plane[plane_id] = 0;
5337 wm_state->wm.plane[plane_id] = 0;
5340 if (plane_id == PLANE_PRIMARY) {
5341 for (level = 0; level < 3; level++) {
5342 struct g4x_pipe_wm *raw =
5343 &crtc_state->wm.g4x.raw[level];
5347 wm_state->sr.fbc = 0;
5348 wm_state->hpll.fbc = 0;
5349 wm_state->fbc_en = false;
5353 for_each_intel_crtc(&dev_priv->drm, crtc) {
5354 struct intel_crtc_state *crtc_state =
5355 to_intel_crtc_state(crtc->base.state);
5357 crtc_state->wm.g4x.intermediate =
5358 crtc_state->wm.g4x.optimal;
5359 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
5362 g4x_program_watermarks(dev_priv);
5364 mutex_unlock(&dev_priv->wm.wm_mutex);
5367 void vlv_wm_get_hw_state(struct drm_device *dev)
5369 struct drm_i915_private *dev_priv = to_i915(dev);
5370 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
5371 struct intel_crtc *crtc;
5374 vlv_read_wm_values(dev_priv, wm);
5376 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
5377 wm->level = VLV_WM_LEVEL_PM2;
5379 if (IS_CHERRYVIEW(dev_priv)) {
5380 mutex_lock(&dev_priv->rps.hw_lock);
5382 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5383 if (val & DSP_MAXFIFO_PM5_ENABLE)
5384 wm->level = VLV_WM_LEVEL_PM5;
5387 * If DDR DVFS is disabled in the BIOS, Punit
5388 * will never ack the request. So if that happens
5389 * assume we don't have to enable/disable DDR DVFS
5390 * dynamically. To test that just set the REQ_ACK
5391 * bit to poke the Punit, but don't change the
5392 * HIGH/LOW bits so that we don't actually change
5393 * the current state.
5395 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
5396 val |= FORCE_DDR_FREQ_REQ_ACK;
5397 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
5399 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
5400 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
5401 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
5402 "assuming DDR DVFS is disabled\n");
5403 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
5405 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
5406 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
5407 wm->level = VLV_WM_LEVEL_DDR_DVFS;
5410 mutex_unlock(&dev_priv->rps.hw_lock);
5413 for_each_intel_crtc(dev, crtc) {
5414 struct intel_crtc_state *crtc_state =
5415 to_intel_crtc_state(crtc->base.state);
5416 struct vlv_wm_state *active = &crtc->wm.active.vlv;
5417 const struct vlv_fifo_state *fifo_state =
5418 &crtc_state->wm.vlv.fifo_state;
5419 enum pipe pipe = crtc->pipe;
5420 enum plane_id plane_id;
5423 vlv_get_fifo_size(crtc_state);
5425 active->num_levels = wm->level + 1;
5426 active->cxsr = wm->cxsr;
5428 for (level = 0; level < active->num_levels; level++) {
5429 struct g4x_pipe_wm *raw =
5430 &crtc_state->wm.vlv.raw[level];
5432 active->sr[level].plane = wm->sr.plane;
5433 active->sr[level].cursor = wm->sr.cursor;
5435 for_each_plane_id_on_crtc(crtc, plane_id) {
5436 active->wm[level].plane[plane_id] =
5437 wm->pipe[pipe].plane[plane_id];
5439 raw->plane[plane_id] =
5440 vlv_invert_wm_value(active->wm[level].plane[plane_id],
5441 fifo_state->plane[plane_id]);
5445 for_each_plane_id_on_crtc(crtc, plane_id)
5446 vlv_raw_plane_wm_set(crtc_state, level,
5447 plane_id, USHRT_MAX);
5448 vlv_invalidate_wms(crtc, active, level);
5450 crtc_state->wm.vlv.optimal = *active;
5451 crtc_state->wm.vlv.intermediate = *active;
5453 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
5455 wm->pipe[pipe].plane[PLANE_PRIMARY],
5456 wm->pipe[pipe].plane[PLANE_CURSOR],
5457 wm->pipe[pipe].plane[PLANE_SPRITE0],
5458 wm->pipe[pipe].plane[PLANE_SPRITE1]);
5461 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
5462 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
5465 void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
5467 struct intel_plane *plane;
5468 struct intel_crtc *crtc;
5470 mutex_lock(&dev_priv->wm.wm_mutex);
5472 for_each_intel_plane(&dev_priv->drm, plane) {
5473 struct intel_crtc *crtc =
5474 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
5475 struct intel_crtc_state *crtc_state =
5476 to_intel_crtc_state(crtc->base.state);
5477 struct intel_plane_state *plane_state =
5478 to_intel_plane_state(plane->base.state);
5479 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
5480 const struct vlv_fifo_state *fifo_state =
5481 &crtc_state->wm.vlv.fifo_state;
5482 enum plane_id plane_id = plane->id;
5485 if (plane_state->base.visible)
5488 for (level = 0; level < wm_state->num_levels; level++) {
5489 struct g4x_pipe_wm *raw =
5490 &crtc_state->wm.vlv.raw[level];
5492 raw->plane[plane_id] = 0;
5494 wm_state->wm[level].plane[plane_id] =
5495 vlv_invert_wm_value(raw->plane[plane_id],
5496 fifo_state->plane[plane_id]);
5500 for_each_intel_crtc(&dev_priv->drm, crtc) {
5501 struct intel_crtc_state *crtc_state =
5502 to_intel_crtc_state(crtc->base.state);
5504 crtc_state->wm.vlv.intermediate =
5505 crtc_state->wm.vlv.optimal;
5506 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
5509 vlv_program_watermarks(dev_priv);
5511 mutex_unlock(&dev_priv->wm.wm_mutex);
5514 void ilk_wm_get_hw_state(struct drm_device *dev)
5516 struct drm_i915_private *dev_priv = to_i915(dev);
5517 struct ilk_wm_values *hw = &dev_priv->wm.hw;
5518 struct drm_crtc *crtc;
5520 for_each_crtc(dev, crtc)
5521 ilk_pipe_wm_get_hw_state(crtc);
5523 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
5524 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
5525 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
5527 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
5528 if (INTEL_GEN(dev_priv) >= 7) {
5529 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
5530 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
5533 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5534 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
5535 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
5536 else if (IS_IVYBRIDGE(dev_priv))
5537 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
5538 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
5541 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
5545 * intel_update_watermarks - update FIFO watermark values based on current modes
5547 * Calculate watermark values for the various WM regs based on current mode
5548 * and plane configuration.
5550 * There are several cases to deal with here:
5551 * - normal (i.e. non-self-refresh)
5552 * - self-refresh (SR) mode
5553 * - lines are large relative to FIFO size (buffer can hold up to 2)
5554 * - lines are small relative to FIFO size (buffer can hold more than 2
5555 * lines), so need to account for TLB latency
5557 * The normal calculation is:
5558 * watermark = dotclock * bytes per pixel * latency
5559 * where latency is platform & configuration dependent (we assume pessimal
5562 * The SR calculation is:
5563 * watermark = (trunc(latency/line time)+1) * surface width *
5566 * line time = htotal / dotclock
5567 * surface width = hdisplay for normal plane and 64 for cursor
5568 * and latency is assumed to be high, as above.
5570 * The final value programmed to the register should always be rounded up,
5571 * and include an extra 2 entries to account for clock crossings.
5573 * We don't use the sprite, so we can ignore that. And on Crestline we have
5574 * to set the non-SR watermarks to 8.
5576 void intel_update_watermarks(struct intel_crtc *crtc)
5578 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5580 if (dev_priv->display.update_wm)
5581 dev_priv->display.update_wm(crtc);
5585 * Lock protecting IPS related data structures
5587 DEFINE_SPINLOCK(mchdev_lock);
5589 /* Global for IPS driver to get at the current i915 device. Protected by
5591 static struct drm_i915_private *i915_mch_dev;
5593 bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
5597 lockdep_assert_held(&mchdev_lock);
5599 rgvswctl = I915_READ16(MEMSWCTL);
5600 if (rgvswctl & MEMCTL_CMD_STS) {
5601 DRM_DEBUG("gpu busy, RCS change rejected\n");
5602 return false; /* still busy with another command */
5605 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
5606 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
5607 I915_WRITE16(MEMSWCTL, rgvswctl);
5608 POSTING_READ16(MEMSWCTL);
5610 rgvswctl |= MEMCTL_CMD_STS;
5611 I915_WRITE16(MEMSWCTL, rgvswctl);
5616 static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
5619 u8 fmax, fmin, fstart, vstart;
5621 spin_lock_irq(&mchdev_lock);
5623 rgvmodectl = I915_READ(MEMMODECTL);
5625 /* Enable temp reporting */
5626 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
5627 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
5629 /* 100ms RC evaluation intervals */
5630 I915_WRITE(RCUPEI, 100000);
5631 I915_WRITE(RCDNEI, 100000);
5633 /* Set max/min thresholds to 90ms and 80ms respectively */
5634 I915_WRITE(RCBMAXAVG, 90000);
5635 I915_WRITE(RCBMINAVG, 80000);
5637 I915_WRITE(MEMIHYST, 1);
5639 /* Set up min, max, and cur for interrupt handling */
5640 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
5641 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
5642 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
5643 MEMMODE_FSTART_SHIFT;
5645 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
5648 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
5649 dev_priv->ips.fstart = fstart;
5651 dev_priv->ips.max_delay = fstart;
5652 dev_priv->ips.min_delay = fmin;
5653 dev_priv->ips.cur_delay = fstart;
5655 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
5656 fmax, fmin, fstart);
5658 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
5661 * Interrupts will be enabled in ironlake_irq_postinstall
5664 I915_WRITE(VIDSTART, vstart);
5665 POSTING_READ(VIDSTART);
5667 rgvmodectl |= MEMMODE_SWMODE_EN;
5668 I915_WRITE(MEMMODECTL, rgvmodectl);
5670 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
5671 DRM_ERROR("stuck trying to change perf mode\n");
5674 ironlake_set_drps(dev_priv, fstart);
5676 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
5677 I915_READ(DDREC) + I915_READ(CSIEC);
5678 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
5679 dev_priv->ips.last_count2 = I915_READ(GFXEC);
5680 dev_priv->ips.last_time2 = ktime_get_raw_ns();
5682 spin_unlock_irq(&mchdev_lock);
5685 static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
5689 spin_lock_irq(&mchdev_lock);
5691 rgvswctl = I915_READ16(MEMSWCTL);
5693 /* Ack interrupts, disable EFC interrupt */
5694 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
5695 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
5696 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
5697 I915_WRITE(DEIIR, DE_PCU_EVENT);
5698 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
5700 /* Go back to the starting frequency */
5701 ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
5703 rgvswctl |= MEMCTL_CMD_STS;
5704 I915_WRITE(MEMSWCTL, rgvswctl);
5707 spin_unlock_irq(&mchdev_lock);
5710 /* There's a funny hw issue where the hw returns all 0 when reading from
5711 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
5712 * ourselves, instead of doing a rmw cycle (which might result in us clearing
5713 * all limits and the gpu stuck at whatever frequency it is at atm).
5715 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
5719 /* Only set the down limit when we've reached the lowest level to avoid
5720 * getting more interrupts, otherwise leave this clear. This prevents a
5721 * race in the hw when coming out of rc6: There's a tiny window where
5722 * the hw runs at the minimal clock before selecting the desired
5723 * frequency, if the down threshold expires in that window we will not
5724 * receive a down interrupt. */
5725 if (IS_GEN9(dev_priv)) {
5726 limits = (dev_priv->rps.max_freq_softlimit) << 23;
5727 if (val <= dev_priv->rps.min_freq_softlimit)
5728 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
5730 limits = dev_priv->rps.max_freq_softlimit << 24;
5731 if (val <= dev_priv->rps.min_freq_softlimit)
5732 limits |= dev_priv->rps.min_freq_softlimit << 16;
5738 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
5741 u32 threshold_up = 0, threshold_down = 0; /* in % */
5742 u32 ei_up = 0, ei_down = 0;
5744 new_power = dev_priv->rps.power;
5745 switch (dev_priv->rps.power) {
5747 if (val > dev_priv->rps.efficient_freq + 1 &&
5748 val > dev_priv->rps.cur_freq)
5749 new_power = BETWEEN;
5753 if (val <= dev_priv->rps.efficient_freq &&
5754 val < dev_priv->rps.cur_freq)
5755 new_power = LOW_POWER;
5756 else if (val >= dev_priv->rps.rp0_freq &&
5757 val > dev_priv->rps.cur_freq)
5758 new_power = HIGH_POWER;
5762 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
5763 val < dev_priv->rps.cur_freq)
5764 new_power = BETWEEN;
5767 /* Max/min bins are special */
5768 if (val <= dev_priv->rps.min_freq_softlimit)
5769 new_power = LOW_POWER;
5770 if (val >= dev_priv->rps.max_freq_softlimit)
5771 new_power = HIGH_POWER;
5772 if (new_power == dev_priv->rps.power)
5775 /* Note the units here are not exactly 1us, but 1280ns. */
5776 switch (new_power) {
5778 /* Upclock if more than 95% busy over 16ms */
5782 /* Downclock if less than 85% busy over 32ms */
5784 threshold_down = 85;
5788 /* Upclock if more than 90% busy over 13ms */
5792 /* Downclock if less than 75% busy over 32ms */
5794 threshold_down = 75;
5798 /* Upclock if more than 85% busy over 10ms */
5802 /* Downclock if less than 60% busy over 32ms */
5804 threshold_down = 60;
5808 /* When byt can survive without system hang with dynamic
5809 * sw freq adjustments, this restriction can be lifted.
5811 if (IS_VALLEYVIEW(dev_priv))
5814 I915_WRITE(GEN6_RP_UP_EI,
5815 GT_INTERVAL_FROM_US(dev_priv, ei_up));
5816 I915_WRITE(GEN6_RP_UP_THRESHOLD,
5817 GT_INTERVAL_FROM_US(dev_priv,
5818 ei_up * threshold_up / 100));
5820 I915_WRITE(GEN6_RP_DOWN_EI,
5821 GT_INTERVAL_FROM_US(dev_priv, ei_down));
5822 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
5823 GT_INTERVAL_FROM_US(dev_priv,
5824 ei_down * threshold_down / 100));
5826 I915_WRITE(GEN6_RP_CONTROL,
5827 GEN6_RP_MEDIA_TURBO |
5828 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5829 GEN6_RP_MEDIA_IS_GFX |
5831 GEN6_RP_UP_BUSY_AVG |
5832 GEN6_RP_DOWN_IDLE_AVG);
5835 dev_priv->rps.power = new_power;
5836 dev_priv->rps.up_threshold = threshold_up;
5837 dev_priv->rps.down_threshold = threshold_down;
5838 dev_priv->rps.last_adj = 0;
5841 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
5845 /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
5846 if (val > dev_priv->rps.min_freq_softlimit)
5847 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
5848 if (val < dev_priv->rps.max_freq_softlimit)
5849 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
5851 mask &= dev_priv->pm_rps_events;
5853 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
5856 /* gen6_set_rps is called to update the frequency request, but should also be
5857 * called when the range (min_delay and max_delay) is modified so that we can
5858 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
5859 static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
5861 /* min/max delay may still have been modified so be sure to
5862 * write the limits value.
5864 if (val != dev_priv->rps.cur_freq) {
5865 gen6_set_rps_thresholds(dev_priv, val);
5867 if (IS_GEN9(dev_priv))
5868 I915_WRITE(GEN6_RPNSWREQ,
5869 GEN9_FREQUENCY(val));
5870 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5871 I915_WRITE(GEN6_RPNSWREQ,
5872 HSW_FREQUENCY(val));
5874 I915_WRITE(GEN6_RPNSWREQ,
5875 GEN6_FREQUENCY(val) |
5877 GEN6_AGGRESSIVE_TURBO);
5880 /* Make sure we continue to get interrupts
5881 * until we hit the minimum or maximum frequencies.
5883 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
5884 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
5886 dev_priv->rps.cur_freq = val;
5887 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
5892 static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
5896 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
5897 "Odd GPU freq value\n"))
5900 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
5902 if (val != dev_priv->rps.cur_freq) {
5903 err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
5907 gen6_set_rps_thresholds(dev_priv, val);
5910 dev_priv->rps.cur_freq = val;
5911 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
5916 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
5918 * * If Gfx is Idle, then
5919 * 1. Forcewake Media well.
5920 * 2. Request idle freq.
5921 * 3. Release Forcewake of Media well.
5923 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
5925 u32 val = dev_priv->rps.idle_freq;
5928 if (dev_priv->rps.cur_freq <= val)
5931 /* The punit delays the write of the frequency and voltage until it
5932 * determines the GPU is awake. During normal usage we don't want to
5933 * waste power changing the frequency if the GPU is sleeping (rc6).
5934 * However, the GPU and driver is now idle and we do not want to delay
5935 * switching to minimum voltage (reducing power whilst idle) as we do
5936 * not expect to be woken in the near future and so must flush the
5937 * change by waking the device.
5939 * We choose to take the media powerwell (either would do to trick the
5940 * punit into committing the voltage change) as that takes a lot less
5941 * power than the render powerwell.
5943 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
5944 err = valleyview_set_rps(dev_priv, val);
5945 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
5948 DRM_ERROR("Failed to set RPS for idle\n");
5951 void gen6_rps_busy(struct drm_i915_private *dev_priv)
5953 mutex_lock(&dev_priv->rps.hw_lock);
5954 if (dev_priv->rps.enabled) {
5957 if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
5958 gen6_rps_reset_ei(dev_priv);
5959 I915_WRITE(GEN6_PMINTRMSK,
5960 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
5962 gen6_enable_rps_interrupts(dev_priv);
5964 /* Use the user's desired frequency as a guide, but for better
5965 * performance, jump directly to RPe as our starting frequency.
5967 freq = max(dev_priv->rps.cur_freq,
5968 dev_priv->rps.efficient_freq);
5970 if (intel_set_rps(dev_priv,
5972 dev_priv->rps.min_freq_softlimit,
5973 dev_priv->rps.max_freq_softlimit)))
5974 DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
5976 mutex_unlock(&dev_priv->rps.hw_lock);
5979 void gen6_rps_idle(struct drm_i915_private *dev_priv)
5981 /* Flush our bottom-half so that it does not race with us
5982 * setting the idle frequency and so that it is bounded by
5983 * our rpm wakeref. And then disable the interrupts to stop any
5984 * futher RPS reclocking whilst we are asleep.
5986 gen6_disable_rps_interrupts(dev_priv);
5988 mutex_lock(&dev_priv->rps.hw_lock);
5989 if (dev_priv->rps.enabled) {
5990 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5991 vlv_set_rps_idle(dev_priv);
5993 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
5994 dev_priv->rps.last_adj = 0;
5995 I915_WRITE(GEN6_PMINTRMSK,
5996 gen6_sanitize_rps_pm_mask(dev_priv, ~0));
5998 mutex_unlock(&dev_priv->rps.hw_lock);
6000 spin_lock(&dev_priv->rps.client_lock);
6001 while (!list_empty(&dev_priv->rps.clients))
6002 list_del_init(dev_priv->rps.clients.next);
6003 spin_unlock(&dev_priv->rps.client_lock);
6006 void gen6_rps_boost(struct drm_i915_private *dev_priv,
6007 struct intel_rps_client *rps,
6008 unsigned long submitted)
6010 /* This is intentionally racy! We peek at the state here, then
6011 * validate inside the RPS worker.
6013 if (!(dev_priv->gt.awake &&
6014 dev_priv->rps.enabled &&
6015 dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
6018 /* Force a RPS boost (and don't count it against the client) if
6019 * the GPU is severely congested.
6021 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
6024 spin_lock(&dev_priv->rps.client_lock);
6025 if (rps == NULL || list_empty(&rps->link)) {
6026 spin_lock_irq(&dev_priv->irq_lock);
6027 if (dev_priv->rps.interrupts_enabled) {
6028 dev_priv->rps.client_boost = true;
6029 schedule_work(&dev_priv->rps.work);
6031 spin_unlock_irq(&dev_priv->irq_lock);
6034 list_add(&rps->link, &dev_priv->rps.clients);
6037 dev_priv->rps.boosts++;
6039 spin_unlock(&dev_priv->rps.client_lock);
6042 int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
6046 lockdep_assert_held(&dev_priv->rps.hw_lock);
6047 GEM_BUG_ON(val > dev_priv->rps.max_freq);
6048 GEM_BUG_ON(val < dev_priv->rps.min_freq);
6050 if (!dev_priv->rps.enabled) {
6051 dev_priv->rps.cur_freq = val;
6055 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6056 err = valleyview_set_rps(dev_priv, val);
6058 err = gen6_set_rps(dev_priv, val);
6063 static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
6065 I915_WRITE(GEN6_RC_CONTROL, 0);
6066 I915_WRITE(GEN9_PG_ENABLE, 0);
6069 static void gen9_disable_rps(struct drm_i915_private *dev_priv)
6071 I915_WRITE(GEN6_RP_CONTROL, 0);
6074 static void gen6_disable_rps(struct drm_i915_private *dev_priv)
6076 I915_WRITE(GEN6_RC_CONTROL, 0);
6077 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
6078 I915_WRITE(GEN6_RP_CONTROL, 0);
6081 static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
6083 I915_WRITE(GEN6_RC_CONTROL, 0);
6086 static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
6088 /* we're doing forcewake before Disabling RC6,
6089 * This what the BIOS expects when going into suspend */
6090 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6092 I915_WRITE(GEN6_RC_CONTROL, 0);
6094 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6097 static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
6099 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6100 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
6101 mode = GEN6_RC_CTL_RC6_ENABLE;
6105 if (HAS_RC6p(dev_priv))
6106 DRM_DEBUG_DRIVER("Enabling RC6 states: "
6107 "RC6 %s RC6p %s RC6pp %s\n",
6108 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
6109 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
6110 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
6113 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
6114 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
6117 static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
6119 struct i915_ggtt *ggtt = &dev_priv->ggtt;
6120 bool enable_rc6 = true;
6121 unsigned long rc6_ctx_base;
6125 rc_ctl = I915_READ(GEN6_RC_CONTROL);
6126 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
6127 RC_SW_TARGET_STATE_SHIFT;
6128 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
6129 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
6130 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
6131 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
6134 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
6135 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
6140 * The exact context size is not known for BXT, so assume a page size
6143 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
6144 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
6145 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
6146 ggtt->stolen_reserved_size))) {
6147 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
6151 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
6152 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
6153 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
6154 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
6155 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
6159 if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
6160 !I915_READ(GEN8_PUSHBUS_ENABLE) ||
6161 !I915_READ(GEN8_PUSHBUS_SHIFT)) {
6162 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
6166 if (!I915_READ(GEN6_GFXPAUSE)) {
6167 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
6171 if (!I915_READ(GEN8_MISC_CTRL0)) {
6172 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
6179 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
6181 /* No RC6 before Ironlake and code is gone for ilk. */
6182 if (INTEL_INFO(dev_priv)->gen < 6)
6188 if (IS_GEN9_LP(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
6189 DRM_INFO("RC6 disabled by BIOS\n");
6193 /* Respect the kernel parameter if it is set */
6194 if (enable_rc6 >= 0) {
6197 if (HAS_RC6p(dev_priv))
6198 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
6201 mask = INTEL_RC6_ENABLE;
6203 if ((enable_rc6 & mask) != enable_rc6)
6204 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
6205 "(requested %d, valid %d)\n",
6206 enable_rc6 & mask, enable_rc6, mask);
6208 return enable_rc6 & mask;
6211 if (IS_IVYBRIDGE(dev_priv))
6212 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
6214 return INTEL_RC6_ENABLE;
6217 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
6219 /* All of these values are in units of 50MHz */
6221 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
6222 if (IS_GEN9_LP(dev_priv)) {
6223 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
6224 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
6225 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
6226 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
6228 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
6229 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
6230 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
6231 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
6233 /* hw_max = RP0 until we check for overclocking */
6234 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
6236 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
6237 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
6238 IS_GEN9_BC(dev_priv)) {
6239 u32 ddcc_status = 0;
6241 if (sandybridge_pcode_read(dev_priv,
6242 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
6244 dev_priv->rps.efficient_freq =
6246 ((ddcc_status >> 8) & 0xff),
6247 dev_priv->rps.min_freq,
6248 dev_priv->rps.max_freq);
6251 if (IS_GEN9_BC(dev_priv)) {
6252 /* Store the frequency values in 16.66 MHZ units, which is
6253 * the natural hardware unit for SKL
6255 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
6256 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
6257 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
6258 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
6259 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
6263 static void reset_rps(struct drm_i915_private *dev_priv,
6264 int (*set)(struct drm_i915_private *, u8))
6266 u8 freq = dev_priv->rps.cur_freq;
6269 dev_priv->rps.power = -1;
6270 dev_priv->rps.cur_freq = -1;
6272 if (set(dev_priv, freq))
6273 DRM_ERROR("Failed to reset RPS to initial values\n");
6276 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
6277 static void gen9_enable_rps(struct drm_i915_private *dev_priv)
6279 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6281 /* Program defaults and thresholds for RPS*/
6282 I915_WRITE(GEN6_RC_VIDEO_FREQ,
6283 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
6285 /* 1 second timeout*/
6286 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
6287 GT_INTERVAL_FROM_US(dev_priv, 1000000));
6289 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
6291 /* Leaning on the below call to gen6_set_rps to program/setup the
6292 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
6293 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
6294 reset_rps(dev_priv, gen6_set_rps);
6296 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6299 static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
6301 struct intel_engine_cs *engine;
6302 enum intel_engine_id id;
6303 uint32_t rc6_mask = 0;
6305 /* 1a: Software RC state - RC0 */
6306 I915_WRITE(GEN6_RC_STATE, 0);
6308 /* 1b: Get forcewake during program sequence. Although the driver
6309 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6310 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6312 /* 2a: Disable RC states. */
6313 I915_WRITE(GEN6_RC_CONTROL, 0);
6315 /* 2b: Program RC6 thresholds.*/
6317 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
6318 if (IS_SKYLAKE(dev_priv))
6319 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
6321 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
6322 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
6323 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
6324 for_each_engine(engine, dev_priv, id)
6325 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6327 if (HAS_GUC(dev_priv))
6328 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
6330 I915_WRITE(GEN6_RC_SLEEP, 0);
6332 /* 2c: Program Coarse Power Gating Policies. */
6333 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
6334 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
6336 /* 3a: Enable RC6 */
6337 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
6338 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
6339 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
6340 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
6341 I915_WRITE(GEN6_RC_CONTROL,
6342 GEN6_RC_CTL_HW_ENABLE | GEN6_RC_CTL_EI_MODE(1) | rc6_mask);
6345 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
6346 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
6348 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
6349 I915_WRITE(GEN9_PG_ENABLE, 0);
6351 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
6352 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
6354 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6357 static void gen8_enable_rps(struct drm_i915_private *dev_priv)
6359 struct intel_engine_cs *engine;
6360 enum intel_engine_id id;
6361 uint32_t rc6_mask = 0;
6363 /* 1a: Software RC state - RC0 */
6364 I915_WRITE(GEN6_RC_STATE, 0);
6366 /* 1c & 1d: Get forcewake during program sequence. Although the driver
6367 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6368 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6370 /* 2a: Disable RC states. */
6371 I915_WRITE(GEN6_RC_CONTROL, 0);
6373 /* 2b: Program RC6 thresholds.*/
6374 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
6375 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
6376 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
6377 for_each_engine(engine, dev_priv, id)
6378 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6379 I915_WRITE(GEN6_RC_SLEEP, 0);
6380 if (IS_BROADWELL(dev_priv))
6381 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
6383 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
6386 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
6387 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
6388 intel_print_rc6_info(dev_priv, rc6_mask);
6389 if (IS_BROADWELL(dev_priv))
6390 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
6391 GEN7_RC_CTL_TO_MODE |
6394 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
6395 GEN6_RC_CTL_EI_MODE(1) |
6398 /* 4 Program defaults and thresholds for RPS*/
6399 I915_WRITE(GEN6_RPNSWREQ,
6400 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
6401 I915_WRITE(GEN6_RC_VIDEO_FREQ,
6402 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
6403 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
6404 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
6406 /* Docs recommend 900MHz, and 300 MHz respectively */
6407 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
6408 dev_priv->rps.max_freq_softlimit << 24 |
6409 dev_priv->rps.min_freq_softlimit << 16);
6411 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
6412 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
6413 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
6414 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
6416 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6419 I915_WRITE(GEN6_RP_CONTROL,
6420 GEN6_RP_MEDIA_TURBO |
6421 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6422 GEN6_RP_MEDIA_IS_GFX |
6424 GEN6_RP_UP_BUSY_AVG |
6425 GEN6_RP_DOWN_IDLE_AVG);
6427 /* 6: Ring frequency + overclocking (our driver does this later */
6429 reset_rps(dev_priv, gen6_set_rps);
6431 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6434 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
6436 struct intel_engine_cs *engine;
6437 enum intel_engine_id id;
6438 u32 rc6vids, rc6_mask = 0;
6443 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6445 /* Here begins a magic sequence of register writes to enable
6446 * auto-downclocking.
6448 * Perhaps there might be some value in exposing these to
6451 I915_WRITE(GEN6_RC_STATE, 0);
6453 /* Clear the DBG now so we don't confuse earlier errors */
6454 gtfifodbg = I915_READ(GTFIFODBG);
6456 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
6457 I915_WRITE(GTFIFODBG, gtfifodbg);
6460 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6462 /* disable the counters and set deterministic thresholds */
6463 I915_WRITE(GEN6_RC_CONTROL, 0);
6465 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
6466 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
6467 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
6468 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
6469 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
6471 for_each_engine(engine, dev_priv, id)
6472 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6474 I915_WRITE(GEN6_RC_SLEEP, 0);
6475 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
6476 if (IS_IVYBRIDGE(dev_priv))
6477 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
6479 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
6480 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
6481 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
6483 /* Check if we are enabling RC6 */
6484 rc6_mode = intel_enable_rc6();
6485 if (rc6_mode & INTEL_RC6_ENABLE)
6486 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
6488 /* We don't use those on Haswell */
6489 if (!IS_HASWELL(dev_priv)) {
6490 if (rc6_mode & INTEL_RC6p_ENABLE)
6491 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
6493 if (rc6_mode & INTEL_RC6pp_ENABLE)
6494 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
6497 intel_print_rc6_info(dev_priv, rc6_mask);
6499 I915_WRITE(GEN6_RC_CONTROL,
6501 GEN6_RC_CTL_EI_MODE(1) |
6502 GEN6_RC_CTL_HW_ENABLE);
6504 /* Power down if completely idle for over 50ms */
6505 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
6506 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6508 reset_rps(dev_priv, gen6_set_rps);
6511 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
6512 if (IS_GEN6(dev_priv) && ret) {
6513 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
6514 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
6515 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
6516 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
6517 rc6vids &= 0xffff00;
6518 rc6vids |= GEN6_ENCODE_RC6_VID(450);
6519 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
6521 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
6524 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6527 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
6530 unsigned int gpu_freq;
6531 unsigned int max_ia_freq, min_ring_freq;
6532 unsigned int max_gpu_freq, min_gpu_freq;
6533 int scaling_factor = 180;
6534 struct cpufreq_policy *policy;
6536 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6538 policy = cpufreq_cpu_get(0);
6540 max_ia_freq = policy->cpuinfo.max_freq;
6541 cpufreq_cpu_put(policy);
6544 * Default to measured freq if none found, PCU will ensure we
6547 max_ia_freq = tsc_khz;
6550 /* Convert from kHz to MHz */
6551 max_ia_freq /= 1000;
6553 min_ring_freq = I915_READ(DCLK) & 0xf;
6554 /* convert DDR frequency from units of 266.6MHz to bandwidth */
6555 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
6557 if (IS_GEN9_BC(dev_priv)) {
6558 /* Convert GT frequency to 50 HZ units */
6559 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
6560 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
6562 min_gpu_freq = dev_priv->rps.min_freq;
6563 max_gpu_freq = dev_priv->rps.max_freq;
6567 * For each potential GPU frequency, load a ring frequency we'd like
6568 * to use for memory access. We do this by specifying the IA frequency
6569 * the PCU should use as a reference to determine the ring frequency.
6571 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
6572 int diff = max_gpu_freq - gpu_freq;
6573 unsigned int ia_freq = 0, ring_freq = 0;
6575 if (IS_GEN9_BC(dev_priv)) {
6577 * ring_freq = 2 * GT. ring_freq is in 100MHz units
6578 * No floor required for ring frequency on SKL.
6580 ring_freq = gpu_freq;
6581 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
6582 /* max(2 * GT, DDR). NB: GT is 50MHz units */
6583 ring_freq = max(min_ring_freq, gpu_freq);
6584 } else if (IS_HASWELL(dev_priv)) {
6585 ring_freq = mult_frac(gpu_freq, 5, 4);
6586 ring_freq = max(min_ring_freq, ring_freq);
6587 /* leave ia_freq as the default, chosen by cpufreq */
6589 /* On older processors, there is no separate ring
6590 * clock domain, so in order to boost the bandwidth
6591 * of the ring, we need to upclock the CPU (ia_freq).
6593 * For GPU frequencies less than 750MHz,
6594 * just use the lowest ring freq.
6596 if (gpu_freq < min_freq)
6599 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
6600 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
6603 sandybridge_pcode_write(dev_priv,
6604 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
6605 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
6606 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
6611 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
6615 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
6617 switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
6619 /* (2 * 4) config */
6620 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
6623 /* (2 * 6) config */
6624 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
6627 /* (2 * 8) config */
6629 /* Setting (2 * 8) Min RP0 for any other combination */
6630 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
6634 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
6639 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
6643 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
6644 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
6649 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
6653 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
6654 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
6659 static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
6663 val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
6664 rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
6665 FB_GFX_FREQ_FUSE_MASK);
6670 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
6674 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
6676 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
6681 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
6685 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
6687 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
6689 rp0 = min_t(u32, rp0, 0xea);
6694 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
6698 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
6699 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
6700 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
6701 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
6706 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
6710 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
6712 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
6713 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
6714 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
6715 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
6716 * to make sure it matches what Punit accepts.
6718 return max_t(u32, val, 0xc0);
6721 /* Check that the pctx buffer wasn't move under us. */
6722 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
6724 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
6726 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
6727 dev_priv->vlv_pctx->stolen->start);
6731 /* Check that the pcbr address is not empty. */
6732 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
6734 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
6736 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
6739 static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
6741 struct i915_ggtt *ggtt = &dev_priv->ggtt;
6742 unsigned long pctx_paddr, paddr;
6744 int pctx_size = 32*1024;
6746 pcbr = I915_READ(VLV_PCBR);
6747 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
6748 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
6749 paddr = (dev_priv->mm.stolen_base +
6750 (ggtt->stolen_size - pctx_size));
6752 pctx_paddr = (paddr & (~4095));
6753 I915_WRITE(VLV_PCBR, pctx_paddr);
6756 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
6759 static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
6761 struct drm_i915_gem_object *pctx;
6762 unsigned long pctx_paddr;
6764 int pctx_size = 24*1024;
6766 pcbr = I915_READ(VLV_PCBR);
6768 /* BIOS set it up already, grab the pre-alloc'd space */
6771 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
6772 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
6774 I915_GTT_OFFSET_NONE,
6779 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
6782 * From the Gunit register HAS:
6783 * The Gfx driver is expected to program this register and ensure
6784 * proper allocation within Gfx stolen memory. For example, this
6785 * register should be programmed such than the PCBR range does not
6786 * overlap with other ranges, such as the frame buffer, protected
6787 * memory, or any other relevant ranges.
6789 pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
6791 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
6795 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
6796 I915_WRITE(VLV_PCBR, pctx_paddr);
6799 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
6800 dev_priv->vlv_pctx = pctx;
6803 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
6805 if (WARN_ON(!dev_priv->vlv_pctx))
6808 i915_gem_object_put(dev_priv->vlv_pctx);
6809 dev_priv->vlv_pctx = NULL;
6812 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
6814 dev_priv->rps.gpll_ref_freq =
6815 vlv_get_cck_clock(dev_priv, "GPLL ref",
6816 CCK_GPLL_CLOCK_CONTROL,
6817 dev_priv->czclk_freq);
6819 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
6820 dev_priv->rps.gpll_ref_freq);
6823 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
6827 valleyview_setup_pctx(dev_priv);
6829 vlv_init_gpll_ref_freq(dev_priv);
6831 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
6832 switch ((val >> 6) & 3) {
6835 dev_priv->mem_freq = 800;
6838 dev_priv->mem_freq = 1066;
6841 dev_priv->mem_freq = 1333;
6844 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
6846 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
6847 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
6848 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
6849 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
6850 dev_priv->rps.max_freq);
6852 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
6853 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
6854 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
6855 dev_priv->rps.efficient_freq);
6857 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
6858 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
6859 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
6860 dev_priv->rps.rp1_freq);
6862 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
6863 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
6864 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
6865 dev_priv->rps.min_freq);
6868 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
6872 cherryview_setup_pctx(dev_priv);
6874 vlv_init_gpll_ref_freq(dev_priv);
6876 mutex_lock(&dev_priv->sb_lock);
6877 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
6878 mutex_unlock(&dev_priv->sb_lock);
6880 switch ((val >> 2) & 0x7) {
6882 dev_priv->mem_freq = 2000;
6885 dev_priv->mem_freq = 1600;
6888 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
6890 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
6891 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
6892 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
6893 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
6894 dev_priv->rps.max_freq);
6896 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
6897 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
6898 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
6899 dev_priv->rps.efficient_freq);
6901 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
6902 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
6903 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
6904 dev_priv->rps.rp1_freq);
6906 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
6907 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
6908 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
6909 dev_priv->rps.min_freq);
6911 WARN_ONCE((dev_priv->rps.max_freq |
6912 dev_priv->rps.efficient_freq |
6913 dev_priv->rps.rp1_freq |
6914 dev_priv->rps.min_freq) & 1,
6915 "Odd GPU freq values\n");
6918 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6920 valleyview_cleanup_pctx(dev_priv);
6923 static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
6925 struct intel_engine_cs *engine;
6926 enum intel_engine_id id;
6927 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
6929 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6931 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
6932 GT_FIFO_FREE_ENTRIES_CHV);
6934 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
6936 I915_WRITE(GTFIFODBG, gtfifodbg);
6939 cherryview_check_pctx(dev_priv);
6941 /* 1a & 1b: Get forcewake during program sequence. Although the driver
6942 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6943 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6945 /* Disable RC states. */
6946 I915_WRITE(GEN6_RC_CONTROL, 0);
6948 /* 2a: Program RC6 thresholds.*/
6949 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
6950 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
6951 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
6953 for_each_engine(engine, dev_priv, id)
6954 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6955 I915_WRITE(GEN6_RC_SLEEP, 0);
6957 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
6958 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
6960 /* allows RC6 residency counter to work */
6961 I915_WRITE(VLV_COUNTER_CONTROL,
6962 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
6963 VLV_MEDIA_RC6_COUNT_EN |
6964 VLV_RENDER_RC6_COUNT_EN));
6966 /* For now we assume BIOS is allocating and populating the PCBR */
6967 pcbr = I915_READ(VLV_PCBR);
6970 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
6971 (pcbr >> VLV_PCBR_ADDR_SHIFT))
6972 rc6_mode = GEN7_RC_CTL_TO_MODE;
6974 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
6976 /* 4 Program defaults and thresholds for RPS*/
6977 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6978 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
6979 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
6980 I915_WRITE(GEN6_RP_UP_EI, 66000);
6981 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
6983 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6986 I915_WRITE(GEN6_RP_CONTROL,
6987 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6988 GEN6_RP_MEDIA_IS_GFX |
6990 GEN6_RP_UP_BUSY_AVG |
6991 GEN6_RP_DOWN_IDLE_AVG);
6993 /* Setting Fixed Bias */
6994 val = VLV_OVERRIDE_EN |
6996 CHV_BIAS_CPU_50_SOC_50;
6997 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
6999 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
7001 /* RPS code assumes GPLL is used */
7002 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
7004 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
7005 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
7007 reset_rps(dev_priv, valleyview_set_rps);
7009 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
7012 static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
7014 struct intel_engine_cs *engine;
7015 enum intel_engine_id id;
7016 u32 gtfifodbg, val, rc6_mode = 0;
7018 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7020 valleyview_check_pctx(dev_priv);
7022 gtfifodbg = I915_READ(GTFIFODBG);
7024 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
7026 I915_WRITE(GTFIFODBG, gtfifodbg);
7029 /* If VLV, Forcewake all wells, else re-direct to regular path */
7030 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
7032 /* Disable RC states. */
7033 I915_WRITE(GEN6_RC_CONTROL, 0);
7035 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
7036 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
7037 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
7038 I915_WRITE(GEN6_RP_UP_EI, 66000);
7039 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
7041 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7043 I915_WRITE(GEN6_RP_CONTROL,
7044 GEN6_RP_MEDIA_TURBO |
7045 GEN6_RP_MEDIA_HW_NORMAL_MODE |
7046 GEN6_RP_MEDIA_IS_GFX |
7048 GEN6_RP_UP_BUSY_AVG |
7049 GEN6_RP_DOWN_IDLE_CONT);
7051 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
7052 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
7053 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
7055 for_each_engine(engine, dev_priv, id)
7056 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7058 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
7060 /* allows RC6 residency counter to work */
7061 I915_WRITE(VLV_COUNTER_CONTROL,
7062 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
7063 VLV_MEDIA_RC0_COUNT_EN |
7064 VLV_RENDER_RC0_COUNT_EN |
7065 VLV_MEDIA_RC6_COUNT_EN |
7066 VLV_RENDER_RC6_COUNT_EN));
7068 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
7069 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
7071 intel_print_rc6_info(dev_priv, rc6_mode);
7073 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
7075 /* Setting Fixed Bias */
7076 val = VLV_OVERRIDE_EN |
7078 VLV_BIAS_CPU_125_SOC_875;
7079 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
7081 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
7083 /* RPS code assumes GPLL is used */
7084 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
7086 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
7087 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
7089 reset_rps(dev_priv, valleyview_set_rps);
7091 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
7094 static unsigned long intel_pxfreq(u32 vidfreq)
7097 int div = (vidfreq & 0x3f0000) >> 16;
7098 int post = (vidfreq & 0x3000) >> 12;
7099 int pre = (vidfreq & 0x7);
7104 freq = ((div * 133333) / ((1<<post) * pre));
7109 static const struct cparams {
7115 { 1, 1333, 301, 28664 },
7116 { 1, 1066, 294, 24460 },
7117 { 1, 800, 294, 25192 },
7118 { 0, 1333, 276, 27605 },
7119 { 0, 1066, 276, 27605 },
7120 { 0, 800, 231, 23784 },
7123 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
7125 u64 total_count, diff, ret;
7126 u32 count1, count2, count3, m = 0, c = 0;
7127 unsigned long now = jiffies_to_msecs(jiffies), diff1;
7130 lockdep_assert_held(&mchdev_lock);
7132 diff1 = now - dev_priv->ips.last_time1;
7134 /* Prevent division-by-zero if we are asking too fast.
7135 * Also, we don't get interesting results if we are polling
7136 * faster than once in 10ms, so just return the saved value
7140 return dev_priv->ips.chipset_power;
7142 count1 = I915_READ(DMIEC);
7143 count2 = I915_READ(DDREC);
7144 count3 = I915_READ(CSIEC);
7146 total_count = count1 + count2 + count3;
7148 /* FIXME: handle per-counter overflow */
7149 if (total_count < dev_priv->ips.last_count1) {
7150 diff = ~0UL - dev_priv->ips.last_count1;
7151 diff += total_count;
7153 diff = total_count - dev_priv->ips.last_count1;
7156 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
7157 if (cparams[i].i == dev_priv->ips.c_m &&
7158 cparams[i].t == dev_priv->ips.r_t) {
7165 diff = div_u64(diff, diff1);
7166 ret = ((m * diff) + c);
7167 ret = div_u64(ret, 10);
7169 dev_priv->ips.last_count1 = total_count;
7170 dev_priv->ips.last_time1 = now;
7172 dev_priv->ips.chipset_power = ret;
7177 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
7181 if (INTEL_INFO(dev_priv)->gen != 5)
7184 spin_lock_irq(&mchdev_lock);
7186 val = __i915_chipset_val(dev_priv);
7188 spin_unlock_irq(&mchdev_lock);
7193 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
7195 unsigned long m, x, b;
7198 tsfs = I915_READ(TSFS);
7200 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
7201 x = I915_READ8(TR1);
7203 b = tsfs & TSFS_INTR_MASK;
7205 return ((m * x) / 127) - b;
7208 static int _pxvid_to_vd(u8 pxvid)
7213 if (pxvid >= 8 && pxvid < 31)
7216 return (pxvid + 2) * 125;
7219 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
7221 const int vd = _pxvid_to_vd(pxvid);
7222 const int vm = vd - 1125;
7224 if (INTEL_INFO(dev_priv)->is_mobile)
7225 return vm > 0 ? vm : 0;
7230 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
7232 u64 now, diff, diffms;
7235 lockdep_assert_held(&mchdev_lock);
7237 now = ktime_get_raw_ns();
7238 diffms = now - dev_priv->ips.last_time2;
7239 do_div(diffms, NSEC_PER_MSEC);
7241 /* Don't divide by 0 */
7245 count = I915_READ(GFXEC);
7247 if (count < dev_priv->ips.last_count2) {
7248 diff = ~0UL - dev_priv->ips.last_count2;
7251 diff = count - dev_priv->ips.last_count2;
7254 dev_priv->ips.last_count2 = count;
7255 dev_priv->ips.last_time2 = now;
7257 /* More magic constants... */
7259 diff = div_u64(diff, diffms * 10);
7260 dev_priv->ips.gfx_power = diff;
7263 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
7265 if (INTEL_INFO(dev_priv)->gen != 5)
7268 spin_lock_irq(&mchdev_lock);
7270 __i915_update_gfx_val(dev_priv);
7272 spin_unlock_irq(&mchdev_lock);
7275 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
7277 unsigned long t, corr, state1, corr2, state2;
7280 lockdep_assert_held(&mchdev_lock);
7282 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
7283 pxvid = (pxvid >> 24) & 0x7f;
7284 ext_v = pvid_to_extvid(dev_priv, pxvid);
7288 t = i915_mch_val(dev_priv);
7290 /* Revel in the empirically derived constants */
7292 /* Correction factor in 1/100000 units */
7294 corr = ((t * 2349) + 135940);
7296 corr = ((t * 964) + 29317);
7298 corr = ((t * 301) + 1004);
7300 corr = corr * ((150142 * state1) / 10000 - 78642);
7302 corr2 = (corr * dev_priv->ips.corr);
7304 state2 = (corr2 * state1) / 10000;
7305 state2 /= 100; /* convert to mW */
7307 __i915_update_gfx_val(dev_priv);
7309 return dev_priv->ips.gfx_power + state2;
7312 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
7316 if (INTEL_INFO(dev_priv)->gen != 5)
7319 spin_lock_irq(&mchdev_lock);
7321 val = __i915_gfx_val(dev_priv);
7323 spin_unlock_irq(&mchdev_lock);
7329 * i915_read_mch_val - return value for IPS use
7331 * Calculate and return a value for the IPS driver to use when deciding whether
7332 * we have thermal and power headroom to increase CPU or GPU power budget.
7334 unsigned long i915_read_mch_val(void)
7336 struct drm_i915_private *dev_priv;
7337 unsigned long chipset_val, graphics_val, ret = 0;
7339 spin_lock_irq(&mchdev_lock);
7342 dev_priv = i915_mch_dev;
7344 chipset_val = __i915_chipset_val(dev_priv);
7345 graphics_val = __i915_gfx_val(dev_priv);
7347 ret = chipset_val + graphics_val;
7350 spin_unlock_irq(&mchdev_lock);
7354 EXPORT_SYMBOL_GPL(i915_read_mch_val);
7357 * i915_gpu_raise - raise GPU frequency limit
7359 * Raise the limit; IPS indicates we have thermal headroom.
7361 bool i915_gpu_raise(void)
7363 struct drm_i915_private *dev_priv;
7366 spin_lock_irq(&mchdev_lock);
7367 if (!i915_mch_dev) {
7371 dev_priv = i915_mch_dev;
7373 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
7374 dev_priv->ips.max_delay--;
7377 spin_unlock_irq(&mchdev_lock);
7381 EXPORT_SYMBOL_GPL(i915_gpu_raise);
7384 * i915_gpu_lower - lower GPU frequency limit
7386 * IPS indicates we're close to a thermal limit, so throttle back the GPU
7387 * frequency maximum.
7389 bool i915_gpu_lower(void)
7391 struct drm_i915_private *dev_priv;
7394 spin_lock_irq(&mchdev_lock);
7395 if (!i915_mch_dev) {
7399 dev_priv = i915_mch_dev;
7401 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
7402 dev_priv->ips.max_delay++;
7405 spin_unlock_irq(&mchdev_lock);
7409 EXPORT_SYMBOL_GPL(i915_gpu_lower);
7412 * i915_gpu_busy - indicate GPU business to IPS
7414 * Tell the IPS driver whether or not the GPU is busy.
7416 bool i915_gpu_busy(void)
7420 spin_lock_irq(&mchdev_lock);
7422 ret = i915_mch_dev->gt.awake;
7423 spin_unlock_irq(&mchdev_lock);
7427 EXPORT_SYMBOL_GPL(i915_gpu_busy);
7430 * i915_gpu_turbo_disable - disable graphics turbo
7432 * Disable graphics turbo by resetting the max frequency and setting the
7433 * current frequency to the default.
7435 bool i915_gpu_turbo_disable(void)
7437 struct drm_i915_private *dev_priv;
7440 spin_lock_irq(&mchdev_lock);
7441 if (!i915_mch_dev) {
7445 dev_priv = i915_mch_dev;
7447 dev_priv->ips.max_delay = dev_priv->ips.fstart;
7449 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
7453 spin_unlock_irq(&mchdev_lock);
7457 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
7460 * Tells the intel_ips driver that the i915 driver is now loaded, if
7461 * IPS got loaded first.
7463 * This awkward dance is so that neither module has to depend on the
7464 * other in order for IPS to do the appropriate communication of
7465 * GPU turbo limits to i915.
7468 ips_ping_for_i915_load(void)
7472 link = symbol_get(ips_link_to_i915_driver);
7475 symbol_put(ips_link_to_i915_driver);
7479 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
7481 /* We only register the i915 ips part with intel-ips once everything is
7482 * set up, to avoid intel-ips sneaking in and reading bogus values. */
7483 spin_lock_irq(&mchdev_lock);
7484 i915_mch_dev = dev_priv;
7485 spin_unlock_irq(&mchdev_lock);
7487 ips_ping_for_i915_load();
7490 void intel_gpu_ips_teardown(void)
7492 spin_lock_irq(&mchdev_lock);
7493 i915_mch_dev = NULL;
7494 spin_unlock_irq(&mchdev_lock);
7497 static void intel_init_emon(struct drm_i915_private *dev_priv)
7503 /* Disable to program */
7507 /* Program energy weights for various events */
7508 I915_WRITE(SDEW, 0x15040d00);
7509 I915_WRITE(CSIEW0, 0x007f0000);
7510 I915_WRITE(CSIEW1, 0x1e220004);
7511 I915_WRITE(CSIEW2, 0x04000004);
7513 for (i = 0; i < 5; i++)
7514 I915_WRITE(PEW(i), 0);
7515 for (i = 0; i < 3; i++)
7516 I915_WRITE(DEW(i), 0);
7518 /* Program P-state weights to account for frequency power adjustment */
7519 for (i = 0; i < 16; i++) {
7520 u32 pxvidfreq = I915_READ(PXVFREQ(i));
7521 unsigned long freq = intel_pxfreq(pxvidfreq);
7522 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
7527 val *= (freq / 1000);
7529 val /= (127*127*900);
7531 DRM_ERROR("bad pxval: %ld\n", val);
7534 /* Render standby states get 0 weight */
7538 for (i = 0; i < 4; i++) {
7539 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
7540 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
7541 I915_WRITE(PXW(i), val);
7544 /* Adjust magic regs to magic values (more experimental results) */
7545 I915_WRITE(OGW0, 0);
7546 I915_WRITE(OGW1, 0);
7547 I915_WRITE(EG0, 0x00007f00);
7548 I915_WRITE(EG1, 0x0000000e);
7549 I915_WRITE(EG2, 0x000e0000);
7550 I915_WRITE(EG3, 0x68000300);
7551 I915_WRITE(EG4, 0x42000000);
7552 I915_WRITE(EG5, 0x00140031);
7556 for (i = 0; i < 8; i++)
7557 I915_WRITE(PXWL(i), 0);
7559 /* Enable PMON + select events */
7560 I915_WRITE(ECR, 0x80000019);
7562 lcfuse = I915_READ(LCFUSE02);
7564 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
7567 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
7570 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
7573 if (!i915.enable_rc6) {
7574 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
7575 intel_runtime_pm_get(dev_priv);
7578 mutex_lock(&dev_priv->drm.struct_mutex);
7579 mutex_lock(&dev_priv->rps.hw_lock);
7581 /* Initialize RPS limits (for userspace) */
7582 if (IS_CHERRYVIEW(dev_priv))
7583 cherryview_init_gt_powersave(dev_priv);
7584 else if (IS_VALLEYVIEW(dev_priv))
7585 valleyview_init_gt_powersave(dev_priv);
7586 else if (INTEL_GEN(dev_priv) >= 6)
7587 gen6_init_rps_frequencies(dev_priv);
7589 /* Derive initial user preferences/limits from the hardware limits */
7590 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
7591 dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
7593 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
7594 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
7596 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
7597 dev_priv->rps.min_freq_softlimit =
7599 dev_priv->rps.efficient_freq,
7600 intel_freq_opcode(dev_priv, 450));
7602 /* After setting max-softlimit, find the overclock max freq */
7603 if (IS_GEN6(dev_priv) ||
7604 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
7607 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, ¶ms);
7608 if (params & BIT(31)) { /* OC supported */
7609 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
7610 (dev_priv->rps.max_freq & 0xff) * 50,
7611 (params & 0xff) * 50);
7612 dev_priv->rps.max_freq = params & 0xff;
7616 /* Finally allow us to boost to max by default */
7617 dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
7619 mutex_unlock(&dev_priv->rps.hw_lock);
7620 mutex_unlock(&dev_priv->drm.struct_mutex);
7622 intel_autoenable_gt_powersave(dev_priv);
7625 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
7627 if (IS_VALLEYVIEW(dev_priv))
7628 valleyview_cleanup_gt_powersave(dev_priv);
7630 if (!i915.enable_rc6)
7631 intel_runtime_pm_put(dev_priv);
7635 * intel_suspend_gt_powersave - suspend PM work and helper threads
7636 * @dev_priv: i915 device
7638 * We don't want to disable RC6 or other features here, we just want
7639 * to make sure any work we've queued has finished and won't bother
7640 * us while we're suspended.
7642 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
7644 if (INTEL_GEN(dev_priv) < 6)
7647 if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
7648 intel_runtime_pm_put(dev_priv);
7650 /* gen6_rps_idle() will be called later to disable interrupts */
7653 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
7655 dev_priv->rps.enabled = true; /* force disabling */
7656 intel_disable_gt_powersave(dev_priv);
7658 gen6_reset_rps_interrupts(dev_priv);
7661 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
7663 if (!READ_ONCE(dev_priv->rps.enabled))
7666 mutex_lock(&dev_priv->rps.hw_lock);
7668 if (INTEL_GEN(dev_priv) >= 9) {
7669 gen9_disable_rc6(dev_priv);
7670 gen9_disable_rps(dev_priv);
7671 } else if (IS_CHERRYVIEW(dev_priv)) {
7672 cherryview_disable_rps(dev_priv);
7673 } else if (IS_VALLEYVIEW(dev_priv)) {
7674 valleyview_disable_rps(dev_priv);
7675 } else if (INTEL_GEN(dev_priv) >= 6) {
7676 gen6_disable_rps(dev_priv);
7677 } else if (IS_IRONLAKE_M(dev_priv)) {
7678 ironlake_disable_drps(dev_priv);
7681 dev_priv->rps.enabled = false;
7682 mutex_unlock(&dev_priv->rps.hw_lock);
7685 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
7687 /* We shouldn't be disabling as we submit, so this should be less
7688 * racy than it appears!
7690 if (READ_ONCE(dev_priv->rps.enabled))
7693 /* Powersaving is controlled by the host when inside a VM */
7694 if (intel_vgpu_active(dev_priv))
7697 mutex_lock(&dev_priv->rps.hw_lock);
7699 if (IS_CHERRYVIEW(dev_priv)) {
7700 cherryview_enable_rps(dev_priv);
7701 } else if (IS_VALLEYVIEW(dev_priv)) {
7702 valleyview_enable_rps(dev_priv);
7703 } else if (INTEL_GEN(dev_priv) >= 9) {
7704 gen9_enable_rc6(dev_priv);
7705 gen9_enable_rps(dev_priv);
7706 if (IS_GEN9_BC(dev_priv))
7707 gen6_update_ring_freq(dev_priv);
7708 } else if (IS_BROADWELL(dev_priv)) {
7709 gen8_enable_rps(dev_priv);
7710 gen6_update_ring_freq(dev_priv);
7711 } else if (INTEL_GEN(dev_priv) >= 6) {
7712 gen6_enable_rps(dev_priv);
7713 gen6_update_ring_freq(dev_priv);
7714 } else if (IS_IRONLAKE_M(dev_priv)) {
7715 ironlake_enable_drps(dev_priv);
7716 intel_init_emon(dev_priv);
7719 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
7720 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
7722 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
7723 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
7725 dev_priv->rps.enabled = true;
7726 mutex_unlock(&dev_priv->rps.hw_lock);
7729 static void __intel_autoenable_gt_powersave(struct work_struct *work)
7731 struct drm_i915_private *dev_priv =
7732 container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
7733 struct intel_engine_cs *rcs;
7734 struct drm_i915_gem_request *req;
7736 if (READ_ONCE(dev_priv->rps.enabled))
7739 rcs = dev_priv->engine[RCS];
7740 if (rcs->last_retired_context)
7743 if (!rcs->init_context)
7746 mutex_lock(&dev_priv->drm.struct_mutex);
7748 req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
7752 if (!i915.enable_execlists && i915_switch_context(req) == 0)
7753 rcs->init_context(req);
7755 /* Mark the device busy, calling intel_enable_gt_powersave() */
7756 i915_add_request(req);
7759 mutex_unlock(&dev_priv->drm.struct_mutex);
7761 intel_runtime_pm_put(dev_priv);
7764 void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
7766 if (READ_ONCE(dev_priv->rps.enabled))
7769 if (IS_IRONLAKE_M(dev_priv)) {
7770 ironlake_enable_drps(dev_priv);
7771 intel_init_emon(dev_priv);
7772 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
7774 * PCU communication is slow and this doesn't need to be
7775 * done at any specific time, so do this out of our fast path
7776 * to make resume and init faster.
7778 * We depend on the HW RC6 power context save/restore
7779 * mechanism when entering D3 through runtime PM suspend. So
7780 * disable RPM until RPS/RC6 is properly setup. We can only
7781 * get here via the driver load/system resume/runtime resume
7782 * paths, so the _noresume version is enough (and in case of
7783 * runtime resume it's necessary).
7785 if (queue_delayed_work(dev_priv->wq,
7786 &dev_priv->rps.autoenable_work,
7787 round_jiffies_up_relative(HZ)))
7788 intel_runtime_pm_get_noresume(dev_priv);
7792 static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
7795 * On Ibex Peak and Cougar Point, we need to disable clock
7796 * gating for the panel power sequencer or it will fail to
7797 * start up when no ports are active.
7799 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7802 static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
7806 for_each_pipe(dev_priv, pipe) {
7807 I915_WRITE(DSPCNTR(pipe),
7808 I915_READ(DSPCNTR(pipe)) |
7809 DISPPLANE_TRICKLE_FEED_DISABLE);
7811 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
7812 POSTING_READ(DSPSURF(pipe));
7816 static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
7818 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
7819 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
7820 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
7823 * Don't touch WM1S_LP_EN here.
7824 * Doing so could cause underruns.
7828 static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv)
7830 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
7834 * WaFbcDisableDpfcClockGating:ilk
7836 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
7837 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
7838 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
7840 I915_WRITE(PCH_3DCGDIS0,
7841 MARIUNIT_CLOCK_GATE_DISABLE |
7842 SVSMUNIT_CLOCK_GATE_DISABLE);
7843 I915_WRITE(PCH_3DCGDIS1,
7844 VFMUNIT_CLOCK_GATE_DISABLE);
7847 * According to the spec the following bits should be set in
7848 * order to enable memory self-refresh
7849 * The bit 22/21 of 0x42004
7850 * The bit 5 of 0x42020
7851 * The bit 15 of 0x45000
7853 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7854 (I915_READ(ILK_DISPLAY_CHICKEN2) |
7855 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7856 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
7857 I915_WRITE(DISP_ARB_CTL,
7858 (I915_READ(DISP_ARB_CTL) |
7861 ilk_init_lp_watermarks(dev_priv);
7864 * Based on the document from hardware guys the following bits
7865 * should be set unconditionally in order to enable FBC.
7866 * The bit 22 of 0x42000
7867 * The bit 22 of 0x42004
7868 * The bit 7,8,9 of 0x42020.
7870 if (IS_IRONLAKE_M(dev_priv)) {
7871 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
7872 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7873 I915_READ(ILK_DISPLAY_CHICKEN1) |
7875 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7876 I915_READ(ILK_DISPLAY_CHICKEN2) |
7880 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
7882 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7883 I915_READ(ILK_DISPLAY_CHICKEN2) |
7884 ILK_ELPIN_409_SELECT);
7885 I915_WRITE(_3D_CHICKEN2,
7886 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
7887 _3D_CHICKEN2_WM_READ_PIPELINED);
7889 /* WaDisableRenderCachePipelinedFlush:ilk */
7890 I915_WRITE(CACHE_MODE_0,
7891 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
7893 /* WaDisable_RenderCache_OperationalFlush:ilk */
7894 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7896 g4x_disable_trickle_feed(dev_priv);
7898 ibx_init_clock_gating(dev_priv);
7901 static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
7907 * On Ibex Peak and Cougar Point, we need to disable clock
7908 * gating for the panel power sequencer or it will fail to
7909 * start up when no ports are active.
7911 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
7912 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
7913 PCH_CPUNIT_CLOCK_GATE_DISABLE);
7914 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
7915 DPLS_EDP_PPS_FIX_DIS);
7916 /* The below fixes the weird display corruption, a few pixels shifted
7917 * downward, on (only) LVDS of some HP laptops with IVY.
7919 for_each_pipe(dev_priv, pipe) {
7920 val = I915_READ(TRANS_CHICKEN2(pipe));
7921 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
7922 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
7923 if (dev_priv->vbt.fdi_rx_polarity_inverted)
7924 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
7925 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
7926 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
7927 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
7928 I915_WRITE(TRANS_CHICKEN2(pipe), val);
7930 /* WADP0ClockGatingDisable */
7931 for_each_pipe(dev_priv, pipe) {
7932 I915_WRITE(TRANS_CHICKEN1(pipe),
7933 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7937 static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
7941 tmp = I915_READ(MCH_SSKPD);
7942 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
7943 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
7947 static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
7949 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
7951 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
7953 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7954 I915_READ(ILK_DISPLAY_CHICKEN2) |
7955 ILK_ELPIN_409_SELECT);
7957 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
7958 I915_WRITE(_3D_CHICKEN,
7959 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
7961 /* WaDisable_RenderCache_OperationalFlush:snb */
7962 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7965 * BSpec recoomends 8x4 when MSAA is used,
7966 * however in practice 16x4 seems fastest.
7968 * Note that PS/WM thread counts depend on the WIZ hashing
7969 * disable bit, which we don't touch here, but it's good
7970 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7972 I915_WRITE(GEN6_GT_MODE,
7973 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7975 ilk_init_lp_watermarks(dev_priv);
7977 I915_WRITE(CACHE_MODE_0,
7978 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
7980 I915_WRITE(GEN6_UCGCTL1,
7981 I915_READ(GEN6_UCGCTL1) |
7982 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
7983 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7985 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
7986 * gating disable must be set. Failure to set it results in
7987 * flickering pixels due to Z write ordering failures after
7988 * some amount of runtime in the Mesa "fire" demo, and Unigine
7989 * Sanctuary and Tropics, and apparently anything else with
7990 * alpha test or pixel discard.
7992 * According to the spec, bit 11 (RCCUNIT) must also be set,
7993 * but we didn't debug actual testcases to find it out.
7995 * WaDisableRCCUnitClockGating:snb
7996 * WaDisableRCPBUnitClockGating:snb
7998 I915_WRITE(GEN6_UCGCTL2,
7999 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8000 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8002 /* WaStripsFansDisableFastClipPerformanceFix:snb */
8003 I915_WRITE(_3D_CHICKEN3,
8004 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
8008 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
8009 * 3DSTATE_SF number of SF output attributes is more than 16."
8011 I915_WRITE(_3D_CHICKEN3,
8012 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
8015 * According to the spec the following bits should be
8016 * set in order to enable memory self-refresh and fbc:
8017 * The bit21 and bit22 of 0x42000
8018 * The bit21 and bit22 of 0x42004
8019 * The bit5 and bit7 of 0x42020
8020 * The bit14 of 0x70180
8021 * The bit14 of 0x71180
8023 * WaFbcAsynchFlipDisableFbcQueue:snb
8025 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8026 I915_READ(ILK_DISPLAY_CHICKEN1) |
8027 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
8028 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8029 I915_READ(ILK_DISPLAY_CHICKEN2) |
8030 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
8031 I915_WRITE(ILK_DSPCLK_GATE_D,
8032 I915_READ(ILK_DSPCLK_GATE_D) |
8033 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
8034 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
8036 g4x_disable_trickle_feed(dev_priv);
8038 cpt_init_clock_gating(dev_priv);
8040 gen6_check_mch_setup(dev_priv);
8043 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
8045 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
8048 * WaVSThreadDispatchOverride:ivb,vlv
8050 * This actually overrides the dispatch
8051 * mode for all thread types.
8053 reg &= ~GEN7_FF_SCHED_MASK;
8054 reg |= GEN7_FF_TS_SCHED_HW;
8055 reg |= GEN7_FF_VS_SCHED_HW;
8056 reg |= GEN7_FF_DS_SCHED_HW;
8058 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
8061 static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
8064 * TODO: this bit should only be enabled when really needed, then
8065 * disabled when not needed anymore in order to save power.
8067 if (HAS_PCH_LPT_LP(dev_priv))
8068 I915_WRITE(SOUTH_DSPCLK_GATE_D,
8069 I915_READ(SOUTH_DSPCLK_GATE_D) |
8070 PCH_LP_PARTITION_LEVEL_DISABLE);
8072 /* WADPOClockGatingDisable:hsw */
8073 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
8074 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
8075 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
8078 static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
8080 if (HAS_PCH_LPT_LP(dev_priv)) {
8081 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
8083 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
8084 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
8088 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
8089 int general_prio_credits,
8090 int high_prio_credits)
8094 /* WaTempDisableDOPClkGating:bdw */
8095 misccpctl = I915_READ(GEN7_MISCCPCTL);
8096 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
8098 I915_WRITE(GEN8_L3SQCREG1,
8099 L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
8100 L3_HIGH_PRIO_CREDITS(high_prio_credits));
8103 * Wait at least 100 clocks before re-enabling clock gating.
8104 * See the definition of L3SQCREG1 in BSpec.
8106 POSTING_READ(GEN8_L3SQCREG1);
8108 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
8111 static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
8113 gen9_init_clock_gating(dev_priv);
8115 /* WaDisableSDEUnitClockGating:kbl */
8116 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
8117 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
8118 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
8120 /* WaDisableGamClockGating:kbl */
8121 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
8122 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
8123 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
8125 /* WaFbcNukeOnHostModify:kbl */
8126 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
8127 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
8130 static void skylake_init_clock_gating(struct drm_i915_private *dev_priv)
8132 gen9_init_clock_gating(dev_priv);
8134 /* WAC6entrylatency:skl */
8135 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
8136 FBC_LLC_FULLY_OPEN);
8138 /* WaFbcNukeOnHostModify:skl */
8139 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
8140 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
8143 static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
8147 ilk_init_lp_watermarks(dev_priv);
8149 /* WaSwitchSolVfFArbitrationPriority:bdw */
8150 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
8152 /* WaPsrDPAMaskVBlankInSRD:bdw */
8153 I915_WRITE(CHICKEN_PAR1_1,
8154 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
8156 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
8157 for_each_pipe(dev_priv, pipe) {
8158 I915_WRITE(CHICKEN_PIPESL_1(pipe),
8159 I915_READ(CHICKEN_PIPESL_1(pipe)) |
8160 BDW_DPRS_MASK_VBLANK_SRD);
8163 /* WaVSRefCountFullforceMissDisable:bdw */
8164 /* WaDSRefCountFullforceMissDisable:bdw */
8165 I915_WRITE(GEN7_FF_THREAD_MODE,
8166 I915_READ(GEN7_FF_THREAD_MODE) &
8167 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
8169 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
8170 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
8172 /* WaDisableSDEUnitClockGating:bdw */
8173 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
8174 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
8176 /* WaProgramL3SqcReg1Default:bdw */
8177 gen8_set_l3sqc_credits(dev_priv, 30, 2);
8180 * WaGttCachingOffByDefault:bdw
8181 * GTT cache may not work with big pages, so if those
8182 * are ever enabled GTT cache may need to be disabled.
8184 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
8186 /* WaKVMNotificationOnConfigChange:bdw */
8187 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
8188 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
8190 lpt_init_clock_gating(dev_priv);
8192 /* WaDisableDopClockGating:bdw
8194 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
8197 I915_WRITE(GEN6_UCGCTL1,
8198 I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
8201 static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
8203 ilk_init_lp_watermarks(dev_priv);
8205 /* L3 caching of data atomics doesn't work -- disable it. */
8206 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
8207 I915_WRITE(HSW_ROW_CHICKEN3,
8208 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
8210 /* This is required by WaCatErrorRejectionIssue:hsw */
8211 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8212 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8213 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8215 /* WaVSRefCountFullforceMissDisable:hsw */
8216 I915_WRITE(GEN7_FF_THREAD_MODE,
8217 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
8219 /* WaDisable_RenderCache_OperationalFlush:hsw */
8220 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8222 /* enable HiZ Raw Stall Optimization */
8223 I915_WRITE(CACHE_MODE_0_GEN7,
8224 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
8226 /* WaDisable4x2SubspanOptimization:hsw */
8227 I915_WRITE(CACHE_MODE_1,
8228 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
8231 * BSpec recommends 8x4 when MSAA is used,
8232 * however in practice 16x4 seems fastest.
8234 * Note that PS/WM thread counts depend on the WIZ hashing
8235 * disable bit, which we don't touch here, but it's good
8236 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8238 I915_WRITE(GEN7_GT_MODE,
8239 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8241 /* WaSampleCChickenBitEnable:hsw */
8242 I915_WRITE(HALF_SLICE_CHICKEN3,
8243 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
8245 /* WaSwitchSolVfFArbitrationPriority:hsw */
8246 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
8248 /* WaRsPkgCStateDisplayPMReq:hsw */
8249 I915_WRITE(CHICKEN_PAR1_1,
8250 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
8252 lpt_init_clock_gating(dev_priv);
8255 static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv)
8259 ilk_init_lp_watermarks(dev_priv);
8261 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
8263 /* WaDisableEarlyCull:ivb */
8264 I915_WRITE(_3D_CHICKEN3,
8265 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
8267 /* WaDisableBackToBackFlipFix:ivb */
8268 I915_WRITE(IVB_CHICKEN3,
8269 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8270 CHICKEN3_DGMG_DONE_FIX_DISABLE);
8272 /* WaDisablePSDDualDispatchEnable:ivb */
8273 if (IS_IVB_GT1(dev_priv))
8274 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
8275 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
8277 /* WaDisable_RenderCache_OperationalFlush:ivb */
8278 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8280 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
8281 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
8282 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
8284 /* WaApplyL3ControlAndL3ChickenMode:ivb */
8285 I915_WRITE(GEN7_L3CNTLREG1,
8286 GEN7_WA_FOR_GEN7_L3_CONTROL);
8287 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8288 GEN7_WA_L3_CHICKEN_MODE);
8289 if (IS_IVB_GT1(dev_priv))
8290 I915_WRITE(GEN7_ROW_CHICKEN2,
8291 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8293 /* must write both registers */
8294 I915_WRITE(GEN7_ROW_CHICKEN2,
8295 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8296 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
8297 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8300 /* WaForceL3Serialization:ivb */
8301 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
8302 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
8305 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8306 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
8308 I915_WRITE(GEN6_UCGCTL2,
8309 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8311 /* This is required by WaCatErrorRejectionIssue:ivb */
8312 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8313 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8314 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8316 g4x_disable_trickle_feed(dev_priv);
8318 gen7_setup_fixed_func_scheduler(dev_priv);
8320 if (0) { /* causes HiZ corruption on ivb:gt1 */
8321 /* enable HiZ Raw Stall Optimization */
8322 I915_WRITE(CACHE_MODE_0_GEN7,
8323 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
8326 /* WaDisable4x2SubspanOptimization:ivb */
8327 I915_WRITE(CACHE_MODE_1,
8328 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
8331 * BSpec recommends 8x4 when MSAA is used,
8332 * however in practice 16x4 seems fastest.
8334 * Note that PS/WM thread counts depend on the WIZ hashing
8335 * disable bit, which we don't touch here, but it's good
8336 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8338 I915_WRITE(GEN7_GT_MODE,
8339 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8341 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
8342 snpcr &= ~GEN6_MBC_SNPCR_MASK;
8343 snpcr |= GEN6_MBC_SNPCR_MED;
8344 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
8346 if (!HAS_PCH_NOP(dev_priv))
8347 cpt_init_clock_gating(dev_priv);
8349 gen6_check_mch_setup(dev_priv);
8352 static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv)
8354 /* WaDisableEarlyCull:vlv */
8355 I915_WRITE(_3D_CHICKEN3,
8356 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
8358 /* WaDisableBackToBackFlipFix:vlv */
8359 I915_WRITE(IVB_CHICKEN3,
8360 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8361 CHICKEN3_DGMG_DONE_FIX_DISABLE);
8363 /* WaPsdDispatchEnable:vlv */
8364 /* WaDisablePSDDualDispatchEnable:vlv */
8365 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
8366 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
8367 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
8369 /* WaDisable_RenderCache_OperationalFlush:vlv */
8370 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8372 /* WaForceL3Serialization:vlv */
8373 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
8374 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
8376 /* WaDisableDopClockGating:vlv */
8377 I915_WRITE(GEN7_ROW_CHICKEN2,
8378 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8380 /* This is required by WaCatErrorRejectionIssue:vlv */
8381 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8382 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8383 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8385 gen7_setup_fixed_func_scheduler(dev_priv);
8388 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8389 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
8391 I915_WRITE(GEN6_UCGCTL2,
8392 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8394 /* WaDisableL3Bank2xClockGate:vlv
8395 * Disabling L3 clock gating- MMIO 940c[25] = 1
8396 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
8397 I915_WRITE(GEN7_UCGCTL4,
8398 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
8401 * BSpec says this must be set, even though
8402 * WaDisable4x2SubspanOptimization isn't listed for VLV.
8404 I915_WRITE(CACHE_MODE_1,
8405 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
8408 * BSpec recommends 8x4 when MSAA is used,
8409 * however in practice 16x4 seems fastest.
8411 * Note that PS/WM thread counts depend on the WIZ hashing
8412 * disable bit, which we don't touch here, but it's good
8413 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8415 I915_WRITE(GEN7_GT_MODE,
8416 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8419 * WaIncreaseL3CreditsForVLVB0:vlv
8420 * This is the hardware default actually.
8422 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
8425 * WaDisableVLVClockGating_VBIIssue:vlv
8426 * Disable clock gating on th GCFG unit to prevent a delay
8427 * in the reporting of vblank events.
8429 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
8432 static void cherryview_init_clock_gating(struct drm_i915_private *dev_priv)
8434 /* WaVSRefCountFullforceMissDisable:chv */
8435 /* WaDSRefCountFullforceMissDisable:chv */
8436 I915_WRITE(GEN7_FF_THREAD_MODE,
8437 I915_READ(GEN7_FF_THREAD_MODE) &
8438 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
8440 /* WaDisableSemaphoreAndSyncFlipWait:chv */
8441 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
8442 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
8444 /* WaDisableCSUnitClockGating:chv */
8445 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
8446 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
8448 /* WaDisableSDEUnitClockGating:chv */
8449 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
8450 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
8453 * WaProgramL3SqcReg1Default:chv
8454 * See gfxspecs/Related Documents/Performance Guide/
8455 * LSQC Setting Recommendations.
8457 gen8_set_l3sqc_credits(dev_priv, 38, 2);
8460 * GTT cache may not work with big pages, so if those
8461 * are ever enabled GTT cache may need to be disabled.
8463 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
8466 static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
8468 uint32_t dspclk_gate;
8470 I915_WRITE(RENCLK_GATE_D1, 0);
8471 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
8472 GS_UNIT_CLOCK_GATE_DISABLE |
8473 CL_UNIT_CLOCK_GATE_DISABLE);
8474 I915_WRITE(RAMCLK_GATE_D, 0);
8475 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
8476 OVRUNIT_CLOCK_GATE_DISABLE |
8477 OVCUNIT_CLOCK_GATE_DISABLE;
8478 if (IS_GM45(dev_priv))
8479 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
8480 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
8482 /* WaDisableRenderCachePipelinedFlush */
8483 I915_WRITE(CACHE_MODE_0,
8484 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
8486 /* WaDisable_RenderCache_OperationalFlush:g4x */
8487 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8489 g4x_disable_trickle_feed(dev_priv);
8492 static void crestline_init_clock_gating(struct drm_i915_private *dev_priv)
8494 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
8495 I915_WRITE(RENCLK_GATE_D2, 0);
8496 I915_WRITE(DSPCLK_GATE_D, 0);
8497 I915_WRITE(RAMCLK_GATE_D, 0);
8498 I915_WRITE16(DEUC, 0);
8499 I915_WRITE(MI_ARB_STATE,
8500 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
8502 /* WaDisable_RenderCache_OperationalFlush:gen4 */
8503 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8506 static void broadwater_init_clock_gating(struct drm_i915_private *dev_priv)
8508 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
8509 I965_RCC_CLOCK_GATE_DISABLE |
8510 I965_RCPB_CLOCK_GATE_DISABLE |
8511 I965_ISC_CLOCK_GATE_DISABLE |
8512 I965_FBC_CLOCK_GATE_DISABLE);
8513 I915_WRITE(RENCLK_GATE_D2, 0);
8514 I915_WRITE(MI_ARB_STATE,
8515 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
8517 /* WaDisable_RenderCache_OperationalFlush:gen4 */
8518 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
8521 static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
8523 u32 dstate = I915_READ(D_STATE);
8525 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
8526 DSTATE_DOT_CLOCK_GATING;
8527 I915_WRITE(D_STATE, dstate);
8529 if (IS_PINEVIEW(dev_priv))
8530 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
8532 /* IIR "flip pending" means done if this bit is set */
8533 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
8535 /* interrupts should cause a wake up from C3 */
8536 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
8538 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
8539 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
8541 I915_WRITE(MI_ARB_STATE,
8542 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
8545 static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
8547 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
8549 /* interrupts should cause a wake up from C3 */
8550 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
8551 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
8553 I915_WRITE(MEM_MODE,
8554 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
8557 static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
8559 I915_WRITE(MEM_MODE,
8560 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
8561 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
8564 void intel_init_clock_gating(struct drm_i915_private *dev_priv)
8566 dev_priv->display.init_clock_gating(dev_priv);
8569 void intel_suspend_hw(struct drm_i915_private *dev_priv)
8571 if (HAS_PCH_LPT(dev_priv))
8572 lpt_suspend_hw(dev_priv);
8575 static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
8577 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
8581 * intel_init_clock_gating_hooks - setup the clock gating hooks
8582 * @dev_priv: device private
8584 * Setup the hooks that configure which clocks of a given platform can be
8585 * gated and also apply various GT and display specific workarounds for these
8586 * platforms. Note that some GT specific workarounds are applied separately
8587 * when GPU contexts or batchbuffers start their execution.
8589 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
8591 if (IS_SKYLAKE(dev_priv))
8592 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
8593 else if (IS_KABYLAKE(dev_priv))
8594 dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
8595 else if (IS_BROXTON(dev_priv))
8596 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
8597 else if (IS_GEMINILAKE(dev_priv))
8598 dev_priv->display.init_clock_gating = glk_init_clock_gating;
8599 else if (IS_BROADWELL(dev_priv))
8600 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
8601 else if (IS_CHERRYVIEW(dev_priv))
8602 dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
8603 else if (IS_HASWELL(dev_priv))
8604 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
8605 else if (IS_IVYBRIDGE(dev_priv))
8606 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
8607 else if (IS_VALLEYVIEW(dev_priv))
8608 dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
8609 else if (IS_GEN6(dev_priv))
8610 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
8611 else if (IS_GEN5(dev_priv))
8612 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
8613 else if (IS_G4X(dev_priv))
8614 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
8615 else if (IS_I965GM(dev_priv))
8616 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
8617 else if (IS_I965G(dev_priv))
8618 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
8619 else if (IS_GEN3(dev_priv))
8620 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
8621 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
8622 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
8623 else if (IS_GEN2(dev_priv))
8624 dev_priv->display.init_clock_gating = i830_init_clock_gating;
8626 MISSING_CASE(INTEL_DEVID(dev_priv));
8627 dev_priv->display.init_clock_gating = nop_init_clock_gating;
8631 /* Set up chip specific power management-related functions */
8632 void intel_init_pm(struct drm_i915_private *dev_priv)
8634 intel_fbc_init(dev_priv);
8637 if (IS_PINEVIEW(dev_priv))
8638 i915_pineview_get_mem_freq(dev_priv);
8639 else if (IS_GEN5(dev_priv))
8640 i915_ironlake_get_mem_freq(dev_priv);
8642 /* For FIFO watermark updates */
8643 if (INTEL_GEN(dev_priv) >= 9) {
8644 skl_setup_wm_latency(dev_priv);
8645 dev_priv->display.initial_watermarks = skl_initial_wm;
8646 dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
8647 dev_priv->display.compute_global_watermarks = skl_compute_wm;
8648 } else if (HAS_PCH_SPLIT(dev_priv)) {
8649 ilk_setup_wm_latency(dev_priv);
8651 if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
8652 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
8653 (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
8654 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
8655 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
8656 dev_priv->display.compute_intermediate_wm =
8657 ilk_compute_intermediate_wm;
8658 dev_priv->display.initial_watermarks =
8659 ilk_initial_watermarks;
8660 dev_priv->display.optimize_watermarks =
8661 ilk_optimize_watermarks;
8663 DRM_DEBUG_KMS("Failed to read display plane latency. "
8666 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
8667 vlv_setup_wm_latency(dev_priv);
8668 dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
8669 dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
8670 dev_priv->display.initial_watermarks = vlv_initial_watermarks;
8671 dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
8672 dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
8673 } else if (IS_G4X(dev_priv)) {
8674 g4x_setup_wm_latency(dev_priv);
8675 dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm;
8676 dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
8677 dev_priv->display.initial_watermarks = g4x_initial_watermarks;
8678 dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
8679 } else if (IS_PINEVIEW(dev_priv)) {
8680 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
8683 dev_priv->mem_freq)) {
8684 DRM_INFO("failed to find known CxSR latency "
8685 "(found ddr%s fsb freq %d, mem freq %d), "
8687 (dev_priv->is_ddr3 == 1) ? "3" : "2",
8688 dev_priv->fsb_freq, dev_priv->mem_freq);
8689 /* Disable CxSR and never update its watermark again */
8690 intel_set_memory_cxsr(dev_priv, false);
8691 dev_priv->display.update_wm = NULL;
8693 dev_priv->display.update_wm = pineview_update_wm;
8694 } else if (IS_GEN4(dev_priv)) {
8695 dev_priv->display.update_wm = i965_update_wm;
8696 } else if (IS_GEN3(dev_priv)) {
8697 dev_priv->display.update_wm = i9xx_update_wm;
8698 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
8699 } else if (IS_GEN2(dev_priv)) {
8700 if (INTEL_INFO(dev_priv)->num_pipes == 1) {
8701 dev_priv->display.update_wm = i845_update_wm;
8702 dev_priv->display.get_fifo_size = i845_get_fifo_size;
8704 dev_priv->display.update_wm = i9xx_update_wm;
8705 dev_priv->display.get_fifo_size = i830_get_fifo_size;
8708 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
8712 static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
8715 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
8718 case GEN6_PCODE_SUCCESS:
8720 case GEN6_PCODE_UNIMPLEMENTED_CMD:
8721 case GEN6_PCODE_ILLEGAL_CMD:
8723 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
8724 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
8726 case GEN6_PCODE_TIMEOUT:
8729 MISSING_CASE(flags);
8734 static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
8737 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
8740 case GEN6_PCODE_SUCCESS:
8742 case GEN6_PCODE_ILLEGAL_CMD:
8744 case GEN7_PCODE_TIMEOUT:
8746 case GEN7_PCODE_ILLEGAL_DATA:
8748 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
8751 MISSING_CASE(flags);
8756 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
8760 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
8762 /* GEN6_PCODE_* are outside of the forcewake domain, we can
8763 * use te fw I915_READ variants to reduce the amount of work
8764 * required when reading/writing.
8767 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
8768 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
8772 I915_WRITE_FW(GEN6_PCODE_DATA, *val);
8773 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
8774 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
8776 if (__intel_wait_for_register_fw(dev_priv,
8777 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
8779 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
8783 *val = I915_READ_FW(GEN6_PCODE_DATA);
8784 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
8786 if (INTEL_GEN(dev_priv) > 6)
8787 status = gen7_check_mailbox_status(dev_priv);
8789 status = gen6_check_mailbox_status(dev_priv);
8792 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
8800 int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
8805 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
8807 /* GEN6_PCODE_* are outside of the forcewake domain, we can
8808 * use te fw I915_READ variants to reduce the amount of work
8809 * required when reading/writing.
8812 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
8813 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
8817 I915_WRITE_FW(GEN6_PCODE_DATA, val);
8818 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
8819 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
8821 if (__intel_wait_for_register_fw(dev_priv,
8822 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
8824 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
8828 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
8830 if (INTEL_GEN(dev_priv) > 6)
8831 status = gen7_check_mailbox_status(dev_priv);
8833 status = gen6_check_mailbox_status(dev_priv);
8836 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
8844 static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
8845 u32 request, u32 reply_mask, u32 reply,
8850 *status = sandybridge_pcode_read(dev_priv, mbox, &val);
8852 return *status || ((val & reply_mask) == reply);
8856 * skl_pcode_request - send PCODE request until acknowledgment
8857 * @dev_priv: device private
8858 * @mbox: PCODE mailbox ID the request is targeted for
8859 * @request: request ID
8860 * @reply_mask: mask used to check for request acknowledgment
8861 * @reply: value used to check for request acknowledgment
8862 * @timeout_base_ms: timeout for polling with preemption enabled
8864 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
8865 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
8866 * The request is acknowledged once the PCODE reply dword equals @reply after
8867 * applying @reply_mask. Polling is first attempted with preemption enabled
8868 * for @timeout_base_ms and if this times out for another 50 ms with
8869 * preemption disabled.
8871 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
8872 * other error as reported by PCODE.
8874 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
8875 u32 reply_mask, u32 reply, int timeout_base_ms)
8880 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
8882 #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
8886 * Prime the PCODE by doing a request first. Normally it guarantees
8887 * that a subsequent request, at most @timeout_base_ms later, succeeds.
8888 * _wait_for() doesn't guarantee when its passed condition is evaluated
8889 * first, so send the first request explicitly.
8895 ret = _wait_for(COND, timeout_base_ms * 1000, 10);
8900 * The above can time out if the number of requests was low (2 in the
8901 * worst case) _and_ PCODE was busy for some reason even after a
8902 * (queued) request and @timeout_base_ms delay. As a workaround retry
8903 * the poll with preemption disabled to maximize the number of
8904 * requests. Increase the timeout from @timeout_base_ms to 50ms to
8905 * account for interrupts that could reduce the number of these
8906 * requests, and for any quirks of the PCODE firmware that delays
8907 * the request completion.
8909 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
8910 WARN_ON_ONCE(timeout_base_ms > 3);
8912 ret = wait_for_atomic(COND, 50);
8916 return ret ? ret : status;
8920 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
8924 * Slow = Fast = GPLL ref * N
8926 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
8929 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
8931 return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
8934 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
8938 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
8940 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
8943 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
8945 /* CHV needs even values */
8946 return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
8949 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
8951 if (IS_GEN9(dev_priv))
8952 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
8954 else if (IS_CHERRYVIEW(dev_priv))
8955 return chv_gpu_freq(dev_priv, val);
8956 else if (IS_VALLEYVIEW(dev_priv))
8957 return byt_gpu_freq(dev_priv, val);
8959 return val * GT_FREQUENCY_MULTIPLIER;
8962 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
8964 if (IS_GEN9(dev_priv))
8965 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
8966 GT_FREQUENCY_MULTIPLIER);
8967 else if (IS_CHERRYVIEW(dev_priv))
8968 return chv_freq_opcode(dev_priv, val);
8969 else if (IS_VALLEYVIEW(dev_priv))
8970 return byt_freq_opcode(dev_priv, val);
8972 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
8975 struct request_boost {
8976 struct work_struct work;
8977 struct drm_i915_gem_request *req;
8980 static void __intel_rps_boost_work(struct work_struct *work)
8982 struct request_boost *boost = container_of(work, struct request_boost, work);
8983 struct drm_i915_gem_request *req = boost->req;
8985 if (!i915_gem_request_completed(req))
8986 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
8988 i915_gem_request_put(req);
8992 void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
8994 struct request_boost *boost;
8996 if (req == NULL || INTEL_GEN(req->i915) < 6)
8999 if (i915_gem_request_completed(req))
9002 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
9006 boost->req = i915_gem_request_get(req);
9008 INIT_WORK(&boost->work, __intel_rps_boost_work);
9009 queue_work(req->i915->wq, &boost->work);
9012 void intel_pm_setup(struct drm_i915_private *dev_priv)
9014 mutex_init(&dev_priv->rps.hw_lock);
9015 spin_lock_init(&dev_priv->rps.client_lock);
9017 INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
9018 __intel_autoenable_gt_powersave);
9019 INIT_LIST_HEAD(&dev_priv->rps.clients);
9021 dev_priv->pm.suspended = false;
9022 atomic_set(&dev_priv->pm.wakeref_count, 0);
9025 static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
9026 const i915_reg_t reg)
9028 u32 lower, upper, tmp;
9031 /* The register accessed do not need forcewake. We borrow
9032 * uncore lock to prevent concurrent access to range reg.
9034 spin_lock_irq(&dev_priv->uncore.lock);
9036 /* vlv and chv residency counters are 40 bits in width.
9037 * With a control bit, we can choose between upper or lower
9038 * 32bit window into this counter.
9040 * Although we always use the counter in high-range mode elsewhere,
9041 * userspace may attempt to read the value before rc6 is initialised,
9042 * before we have set the default VLV_COUNTER_CONTROL value. So always
9043 * set the high bit to be safe.
9045 I915_WRITE_FW(VLV_COUNTER_CONTROL,
9046 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
9047 upper = I915_READ_FW(reg);
9051 I915_WRITE_FW(VLV_COUNTER_CONTROL,
9052 _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
9053 lower = I915_READ_FW(reg);
9055 I915_WRITE_FW(VLV_COUNTER_CONTROL,
9056 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
9057 upper = I915_READ_FW(reg);
9058 } while (upper != tmp && --loop);
9060 /* Everywhere else we always use VLV_COUNTER_CONTROL with the
9061 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
9065 spin_unlock_irq(&dev_priv->uncore.lock);
9067 return lower | (u64)upper << 8;
9070 u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
9071 const i915_reg_t reg)
9073 u64 time_hw, units, div;
9075 if (!intel_enable_rc6())
9078 intel_runtime_pm_get(dev_priv);
9080 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
9081 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9083 div = dev_priv->czclk_freq;
9085 time_hw = vlv_residency_raw(dev_priv, reg);
9086 } else if (IS_GEN9_LP(dev_priv)) {
9088 div = 1200; /* 833.33ns */
9090 time_hw = I915_READ(reg);
9092 units = 128000; /* 1.28us */
9095 time_hw = I915_READ(reg);
9098 intel_runtime_pm_put(dev_priv);
9099 return DIV_ROUND_UP_ULL(time_hw * units, div);