2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
29 #include <drm/drm_plane_helper.h>
31 #include "intel_drv.h"
32 #include "../../../platform/x86/intel_ips.h"
33 #include <linux/module.h>
34 #include <drm/drm_atomic_helper.h>
39 * RC6 is a special power stage which allows the GPU to enter an very
40 * low-voltage mode when idle, using down to 0V while at this stage. This
41 * stage is entered automatically when the GPU is idle when RC6 support is
42 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
44 * There are different RC6 modes available in Intel GPU, which differentiate
45 * among each other with the latency required to enter and leave RC6 and
46 * voltage consumed by the GPU in different states.
48 * The combination of the following flags define which states GPU is allowed
49 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
50 * RC6pp is deepest RC6. Their support by hardware varies according to the
51 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
52 * which brings the most power savings; deeper states save more power, but
53 * require higher latency to switch to and wake up.
55 #define INTEL_RC6_ENABLE (1<<0)
56 #define INTEL_RC6p_ENABLE (1<<1)
57 #define INTEL_RC6pp_ENABLE (1<<2)
59 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
61 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
62 I915_WRITE(CHICKEN_PAR1_1,
63 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
65 I915_WRITE(GEN8_CONFIG0,
66 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
68 /* WaEnableChickenDCPR:skl,bxt,kbl,glk */
69 I915_WRITE(GEN8_CHICKEN_DCPR_1,
70 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
72 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
73 /* WaFbcWakeMemOn:skl,bxt,kbl,glk */
74 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
76 DISP_FBC_MEMORY_WAKE);
78 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
79 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
80 ILK_DPFC_DISABLE_DUMMY0);
83 static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
85 gen9_init_clock_gating(dev_priv);
87 /* WaDisableSDEUnitClockGating:bxt */
88 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
89 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
93 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
95 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
96 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
99 * Wa: Backlight PWM may stop in the asserted state, causing backlight
102 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
103 PWM1_GATING_DIS | PWM2_GATING_DIS);
106 static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
108 gen9_init_clock_gating(dev_priv);
111 * WaDisablePWMClockGating:glk
112 * Backlight PWM may stop in the asserted state, causing backlight
115 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
116 PWM1_GATING_DIS | PWM2_GATING_DIS);
118 /* WaDDIIOTimeout:glk */
119 if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
120 u32 val = I915_READ(CHICKEN_MISC_2);
121 val &= ~(GLK_CL0_PWR_DOWN |
124 I915_WRITE(CHICKEN_MISC_2, val);
129 static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
133 tmp = I915_READ(CLKCFG);
135 switch (tmp & CLKCFG_FSB_MASK) {
137 dev_priv->fsb_freq = 533; /* 133*4 */
140 dev_priv->fsb_freq = 800; /* 200*4 */
143 dev_priv->fsb_freq = 667; /* 167*4 */
146 dev_priv->fsb_freq = 400; /* 100*4 */
150 switch (tmp & CLKCFG_MEM_MASK) {
152 dev_priv->mem_freq = 533;
155 dev_priv->mem_freq = 667;
158 dev_priv->mem_freq = 800;
162 /* detect pineview DDR3 setting */
163 tmp = I915_READ(CSHRDDR3CTL);
164 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
167 static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
171 ddrpll = I915_READ16(DDRMPLL1);
172 csipll = I915_READ16(CSIPLL0);
174 switch (ddrpll & 0xff) {
176 dev_priv->mem_freq = 800;
179 dev_priv->mem_freq = 1066;
182 dev_priv->mem_freq = 1333;
185 dev_priv->mem_freq = 1600;
188 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
190 dev_priv->mem_freq = 0;
194 dev_priv->ips.r_t = dev_priv->mem_freq;
196 switch (csipll & 0x3ff) {
198 dev_priv->fsb_freq = 3200;
201 dev_priv->fsb_freq = 3733;
204 dev_priv->fsb_freq = 4266;
207 dev_priv->fsb_freq = 4800;
210 dev_priv->fsb_freq = 5333;
213 dev_priv->fsb_freq = 5866;
216 dev_priv->fsb_freq = 6400;
219 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
221 dev_priv->fsb_freq = 0;
225 if (dev_priv->fsb_freq == 3200) {
226 dev_priv->ips.c_m = 0;
227 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
228 dev_priv->ips.c_m = 1;
230 dev_priv->ips.c_m = 2;
234 static const struct cxsr_latency cxsr_latency_table[] = {
235 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
236 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
237 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
238 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
239 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
241 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
242 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
243 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
244 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
245 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
247 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
248 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
249 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
250 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
251 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
253 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
254 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
255 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
256 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
257 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
259 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
260 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
261 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
262 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
263 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
265 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
266 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
267 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
268 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
269 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
272 static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
277 const struct cxsr_latency *latency;
280 if (fsb == 0 || mem == 0)
283 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
284 latency = &cxsr_latency_table[i];
285 if (is_desktop == latency->is_desktop &&
286 is_ddr3 == latency->is_ddr3 &&
287 fsb == latency->fsb_freq && mem == latency->mem_freq)
291 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
296 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
300 mutex_lock(&dev_priv->rps.hw_lock);
302 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
304 val &= ~FORCE_DDR_HIGH_FREQ;
306 val |= FORCE_DDR_HIGH_FREQ;
307 val &= ~FORCE_DDR_LOW_FREQ;
308 val |= FORCE_DDR_FREQ_REQ_ACK;
309 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
311 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
312 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
313 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
315 mutex_unlock(&dev_priv->rps.hw_lock);
318 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
322 mutex_lock(&dev_priv->rps.hw_lock);
324 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
326 val |= DSP_MAXFIFO_PM5_ENABLE;
328 val &= ~DSP_MAXFIFO_PM5_ENABLE;
329 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
331 mutex_unlock(&dev_priv->rps.hw_lock);
334 #define FW_WM(value, plane) \
335 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
337 static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
342 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
343 was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
344 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
345 POSTING_READ(FW_BLC_SELF_VLV);
346 } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
347 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
348 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
349 POSTING_READ(FW_BLC_SELF);
350 } else if (IS_PINEVIEW(dev_priv)) {
351 val = I915_READ(DSPFW3);
352 was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
354 val |= PINEVIEW_SELF_REFRESH_EN;
356 val &= ~PINEVIEW_SELF_REFRESH_EN;
357 I915_WRITE(DSPFW3, val);
358 POSTING_READ(DSPFW3);
359 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
360 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
361 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
362 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
363 I915_WRITE(FW_BLC_SELF, val);
364 POSTING_READ(FW_BLC_SELF);
365 } else if (IS_I915GM(dev_priv)) {
367 * FIXME can't find a bit like this for 915G, and
368 * and yet it does have the related watermark in
369 * FW_BLC_SELF. What's going on?
371 was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
372 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
373 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
374 I915_WRITE(INSTPM, val);
375 POSTING_READ(INSTPM);
380 trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
382 DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
383 enableddisabled(enable),
384 enableddisabled(was_enabled));
390 * intel_set_memory_cxsr - Configure CxSR state
391 * @dev_priv: i915 device
392 * @enable: Allow vs. disallow CxSR
394 * Allow or disallow the system to enter a special CxSR
395 * (C-state self refresh) state. What typically happens in CxSR mode
396 * is that several display FIFOs may get combined into a single larger
397 * FIFO for a particular plane (so called max FIFO mode) to allow the
398 * system to defer memory fetches longer, and the memory will enter
401 * Note that enabling CxSR does not guarantee that the system enter
402 * this special mode, nor does it guarantee that the system stays
403 * in that mode once entered. So this just allows/disallows the system
404 * to autonomously utilize the CxSR mode. Other factors such as core
405 * C-states will affect when/if the system actually enters/exits the
408 * Note that on VLV/CHV this actually only controls the max FIFO mode,
409 * and the system is free to enter/exit memory self refresh at any time
410 * even when the use of CxSR has been disallowed.
412 * While the system is actually in the CxSR/max FIFO mode, some plane
413 * control registers will not get latched on vblank. Thus in order to
414 * guarantee the system will respond to changes in the plane registers
415 * we must always disallow CxSR prior to making changes to those registers.
416 * Unfortunately the system will re-evaluate the CxSR conditions at
417 * frame start which happens after vblank start (which is when the plane
418 * registers would get latched), so we can't proceed with the plane update
419 * during the same frame where we disallowed CxSR.
421 * Certain platforms also have a deeper HPLL SR mode. Fortunately the
422 * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
423 * the hardware w.r.t. HPLL SR when writing to plane registers.
424 * Disallowing just CxSR is sufficient.
426 bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
430 mutex_lock(&dev_priv->wm.wm_mutex);
431 ret = _intel_set_memory_cxsr(dev_priv, enable);
432 dev_priv->wm.vlv.cxsr = enable;
433 mutex_unlock(&dev_priv->wm.wm_mutex);
439 * Latency for FIFO fetches is dependent on several factors:
440 * - memory configuration (speed, channels)
442 * - current MCH state
443 * It can be fairly high in some situations, so here we assume a fairly
444 * pessimal value. It's a tradeoff between extra memory fetches (if we
445 * set this value too high, the FIFO will fetch frequently to stay full)
446 * and power consumption (set it too low to save power and we might see
447 * FIFO underruns and display "flicker").
449 * A value of 5us seems to be a good balance; safe for very low end
450 * platforms but not overly aggressive on lower latency configs.
452 static const int pessimal_latency_ns = 5000;
454 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
455 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
457 static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
459 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
460 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
461 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
462 enum pipe pipe = crtc->pipe;
463 int sprite0_start, sprite1_start;
466 uint32_t dsparb, dsparb2, dsparb3;
468 dsparb = I915_READ(DSPARB);
469 dsparb2 = I915_READ(DSPARB2);
470 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
471 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
474 dsparb = I915_READ(DSPARB);
475 dsparb2 = I915_READ(DSPARB2);
476 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
477 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
480 dsparb2 = I915_READ(DSPARB2);
481 dsparb3 = I915_READ(DSPARB3);
482 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
483 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
490 fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
491 fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
492 fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
493 fifo_state->plane[PLANE_CURSOR] = 63;
496 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
498 uint32_t dsparb = I915_READ(DSPARB);
501 size = dsparb & 0x7f;
503 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
505 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
506 plane ? "B" : "A", size);
511 static int i830_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
513 uint32_t dsparb = I915_READ(DSPARB);
516 size = dsparb & 0x1ff;
518 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
519 size >>= 1; /* Convert to cachelines */
521 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
522 plane ? "B" : "A", size);
527 static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
529 uint32_t dsparb = I915_READ(DSPARB);
532 size = dsparb & 0x7f;
533 size >>= 2; /* Convert to cachelines */
535 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
542 /* Pineview has different values for various configs */
543 static const struct intel_watermark_params pineview_display_wm = {
544 .fifo_size = PINEVIEW_DISPLAY_FIFO,
545 .max_wm = PINEVIEW_MAX_WM,
546 .default_wm = PINEVIEW_DFT_WM,
547 .guard_size = PINEVIEW_GUARD_WM,
548 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
550 static const struct intel_watermark_params pineview_display_hplloff_wm = {
551 .fifo_size = PINEVIEW_DISPLAY_FIFO,
552 .max_wm = PINEVIEW_MAX_WM,
553 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
554 .guard_size = PINEVIEW_GUARD_WM,
555 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
557 static const struct intel_watermark_params pineview_cursor_wm = {
558 .fifo_size = PINEVIEW_CURSOR_FIFO,
559 .max_wm = PINEVIEW_CURSOR_MAX_WM,
560 .default_wm = PINEVIEW_CURSOR_DFT_WM,
561 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
562 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
564 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
565 .fifo_size = PINEVIEW_CURSOR_FIFO,
566 .max_wm = PINEVIEW_CURSOR_MAX_WM,
567 .default_wm = PINEVIEW_CURSOR_DFT_WM,
568 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
569 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
571 static const struct intel_watermark_params g4x_wm_info = {
572 .fifo_size = G4X_FIFO_SIZE,
573 .max_wm = G4X_MAX_WM,
574 .default_wm = G4X_MAX_WM,
576 .cacheline_size = G4X_FIFO_LINE_SIZE,
578 static const struct intel_watermark_params g4x_cursor_wm_info = {
579 .fifo_size = I965_CURSOR_FIFO,
580 .max_wm = I965_CURSOR_MAX_WM,
581 .default_wm = I965_CURSOR_DFT_WM,
583 .cacheline_size = G4X_FIFO_LINE_SIZE,
585 static const struct intel_watermark_params i965_cursor_wm_info = {
586 .fifo_size = I965_CURSOR_FIFO,
587 .max_wm = I965_CURSOR_MAX_WM,
588 .default_wm = I965_CURSOR_DFT_WM,
590 .cacheline_size = I915_FIFO_LINE_SIZE,
592 static const struct intel_watermark_params i945_wm_info = {
593 .fifo_size = I945_FIFO_SIZE,
594 .max_wm = I915_MAX_WM,
597 .cacheline_size = I915_FIFO_LINE_SIZE,
599 static const struct intel_watermark_params i915_wm_info = {
600 .fifo_size = I915_FIFO_SIZE,
601 .max_wm = I915_MAX_WM,
604 .cacheline_size = I915_FIFO_LINE_SIZE,
606 static const struct intel_watermark_params i830_a_wm_info = {
607 .fifo_size = I855GM_FIFO_SIZE,
608 .max_wm = I915_MAX_WM,
611 .cacheline_size = I830_FIFO_LINE_SIZE,
613 static const struct intel_watermark_params i830_bc_wm_info = {
614 .fifo_size = I855GM_FIFO_SIZE,
615 .max_wm = I915_MAX_WM/2,
618 .cacheline_size = I830_FIFO_LINE_SIZE,
620 static const struct intel_watermark_params i845_wm_info = {
621 .fifo_size = I830_FIFO_SIZE,
622 .max_wm = I915_MAX_WM,
625 .cacheline_size = I830_FIFO_LINE_SIZE,
629 * intel_calculate_wm - calculate watermark level
630 * @clock_in_khz: pixel clock
631 * @wm: chip FIFO params
632 * @cpp: bytes per pixel
633 * @latency_ns: memory latency for the platform
635 * Calculate the watermark level (the level at which the display plane will
636 * start fetching from memory again). Each chip has a different display
637 * FIFO size and allocation, so the caller needs to figure that out and pass
638 * in the correct intel_watermark_params structure.
640 * As the pixel clock runs, the FIFO will be drained at a rate that depends
641 * on the pixel size. When it reaches the watermark level, it'll start
642 * fetching FIFO line sized based chunks from memory until the FIFO fills
643 * past the watermark point. If the FIFO drains completely, a FIFO underrun
644 * will occur, and a display engine hang could result.
646 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
647 const struct intel_watermark_params *wm,
648 int fifo_size, int cpp,
649 unsigned long latency_ns)
651 long entries_required, wm_size;
654 * Note: we need to make sure we don't overflow for various clock &
656 * clocks go from a few thousand to several hundred thousand.
657 * latency is usually a few thousand
659 entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
661 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
663 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
665 wm_size = fifo_size - (entries_required + wm->guard_size);
667 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
669 /* Don't promote wm_size to unsigned... */
670 if (wm_size > (long)wm->max_wm)
671 wm_size = wm->max_wm;
673 wm_size = wm->default_wm;
676 * Bspec seems to indicate that the value shouldn't be lower than
677 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
678 * Lets go for 8 which is the burst size since certain platforms
679 * already use a hardcoded 8 (which is what the spec says should be
688 static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
690 return dev_priv->wm.max_level + 1;
693 static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
694 const struct intel_plane_state *plane_state)
696 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
698 /* FIXME check the 'enable' instead */
699 if (!crtc_state->base.active)
703 * Treat cursor with fb as always visible since cursor updates
704 * can happen faster than the vrefresh rate, and the current
705 * watermark code doesn't handle that correctly. Cursor updates
706 * which set/clear the fb or change the cursor size are going
707 * to get throttled by intel_legacy_cursor_update() to work
708 * around this problem with the watermark code.
710 if (plane->id == PLANE_CURSOR)
711 return plane_state->base.fb != NULL;
713 return plane_state->base.visible;
716 static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
718 struct intel_crtc *crtc, *enabled = NULL;
720 for_each_intel_crtc(&dev_priv->drm, crtc) {
721 if (intel_crtc_active(crtc)) {
731 static void pineview_update_wm(struct intel_crtc *unused_crtc)
733 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
734 struct intel_crtc *crtc;
735 const struct cxsr_latency *latency;
739 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
744 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
745 intel_set_memory_cxsr(dev_priv, false);
749 crtc = single_enabled_crtc(dev_priv);
751 const struct drm_display_mode *adjusted_mode =
752 &crtc->config->base.adjusted_mode;
753 const struct drm_framebuffer *fb =
754 crtc->base.primary->state->fb;
755 int cpp = fb->format->cpp[0];
756 int clock = adjusted_mode->crtc_clock;
759 wm = intel_calculate_wm(clock, &pineview_display_wm,
760 pineview_display_wm.fifo_size,
761 cpp, latency->display_sr);
762 reg = I915_READ(DSPFW1);
763 reg &= ~DSPFW_SR_MASK;
764 reg |= FW_WM(wm, SR);
765 I915_WRITE(DSPFW1, reg);
766 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
769 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
770 pineview_display_wm.fifo_size,
771 4, latency->cursor_sr);
772 reg = I915_READ(DSPFW3);
773 reg &= ~DSPFW_CURSOR_SR_MASK;
774 reg |= FW_WM(wm, CURSOR_SR);
775 I915_WRITE(DSPFW3, reg);
777 /* Display HPLL off SR */
778 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
779 pineview_display_hplloff_wm.fifo_size,
780 cpp, latency->display_hpll_disable);
781 reg = I915_READ(DSPFW3);
782 reg &= ~DSPFW_HPLL_SR_MASK;
783 reg |= FW_WM(wm, HPLL_SR);
784 I915_WRITE(DSPFW3, reg);
786 /* cursor HPLL off SR */
787 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
788 pineview_display_hplloff_wm.fifo_size,
789 4, latency->cursor_hpll_disable);
790 reg = I915_READ(DSPFW3);
791 reg &= ~DSPFW_HPLL_CURSOR_MASK;
792 reg |= FW_WM(wm, HPLL_CURSOR);
793 I915_WRITE(DSPFW3, reg);
794 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
796 intel_set_memory_cxsr(dev_priv, true);
798 intel_set_memory_cxsr(dev_priv, false);
802 static bool g4x_compute_wm0(struct drm_i915_private *dev_priv,
804 const struct intel_watermark_params *display,
805 int display_latency_ns,
806 const struct intel_watermark_params *cursor,
807 int cursor_latency_ns,
811 struct intel_crtc *crtc;
812 const struct drm_display_mode *adjusted_mode;
813 const struct drm_framebuffer *fb;
814 int htotal, plane_width, cursor_width, clock, cpp;
815 int line_time_us, line_count;
816 int entries, tlb_miss;
818 crtc = intel_get_crtc_for_plane(dev_priv, plane);
819 if (!intel_crtc_active(crtc)) {
820 *cursor_wm = cursor->guard_size;
821 *plane_wm = display->guard_size;
825 adjusted_mode = &crtc->config->base.adjusted_mode;
826 fb = crtc->base.primary->state->fb;
827 clock = adjusted_mode->crtc_clock;
828 htotal = adjusted_mode->crtc_htotal;
829 plane_width = crtc->config->pipe_src_w;
830 cursor_width = crtc->base.cursor->state->crtc_w;
831 cpp = fb->format->cpp[0];
833 /* Use the small buffer method to calculate plane watermark */
834 entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
835 tlb_miss = display->fifo_size*display->cacheline_size - plane_width * cpp * 8;
838 entries = DIV_ROUND_UP(entries, display->cacheline_size);
839 *plane_wm = entries + display->guard_size;
840 if (*plane_wm > (int)display->max_wm)
841 *plane_wm = display->max_wm;
843 /* Use the large buffer method to calculate cursor watermark */
844 line_time_us = max(htotal * 1000 / clock, 1);
845 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
846 entries = line_count * cursor_width * 4;
847 tlb_miss = cursor->fifo_size*cursor->cacheline_size - cursor_width * 4 * 8;
850 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
851 *cursor_wm = entries + cursor->guard_size;
852 if (*cursor_wm > (int)cursor->max_wm)
853 *cursor_wm = (int)cursor->max_wm;
859 * Check the wm result.
861 * If any calculated watermark values is larger than the maximum value that
862 * can be programmed into the associated watermark register, that watermark
865 static bool g4x_check_srwm(struct drm_i915_private *dev_priv,
866 int display_wm, int cursor_wm,
867 const struct intel_watermark_params *display,
868 const struct intel_watermark_params *cursor)
870 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
871 display_wm, cursor_wm);
873 if (display_wm > display->max_wm) {
874 DRM_DEBUG_KMS("display watermark is too large(%d/%u), disabling\n",
875 display_wm, display->max_wm);
879 if (cursor_wm > cursor->max_wm) {
880 DRM_DEBUG_KMS("cursor watermark is too large(%d/%u), disabling\n",
881 cursor_wm, cursor->max_wm);
885 if (!(display_wm || cursor_wm)) {
886 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
893 static bool g4x_compute_srwm(struct drm_i915_private *dev_priv,
896 const struct intel_watermark_params *display,
897 const struct intel_watermark_params *cursor,
898 int *display_wm, int *cursor_wm)
900 struct intel_crtc *crtc;
901 const struct drm_display_mode *adjusted_mode;
902 const struct drm_framebuffer *fb;
903 int hdisplay, htotal, cpp, clock;
904 unsigned long line_time_us;
905 int line_count, line_size;
910 *display_wm = *cursor_wm = 0;
914 crtc = intel_get_crtc_for_plane(dev_priv, plane);
915 adjusted_mode = &crtc->config->base.adjusted_mode;
916 fb = crtc->base.primary->state->fb;
917 clock = adjusted_mode->crtc_clock;
918 htotal = adjusted_mode->crtc_htotal;
919 hdisplay = crtc->config->pipe_src_w;
920 cpp = fb->format->cpp[0];
922 line_time_us = max(htotal * 1000 / clock, 1);
923 line_count = (latency_ns / line_time_us + 1000) / 1000;
924 line_size = hdisplay * cpp;
926 /* Use the minimum of the small and large buffer method for primary */
927 small = ((clock * cpp / 1000) * latency_ns) / 1000;
928 large = line_count * line_size;
930 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
931 *display_wm = entries + display->guard_size;
933 /* calculate the self-refresh watermark for display cursor */
934 entries = line_count * 4 * crtc->base.cursor->state->crtc_w;
935 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
936 *cursor_wm = entries + cursor->guard_size;
938 return g4x_check_srwm(dev_priv,
939 *display_wm, *cursor_wm,
943 #define FW_WM_VLV(value, plane) \
944 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
946 static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
947 const struct vlv_wm_values *wm)
951 for_each_pipe(dev_priv, pipe) {
952 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
954 I915_WRITE(VLV_DDL(pipe),
955 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
956 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
957 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
958 (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
962 * Zero the (unused) WM1 watermarks, and also clear all the
963 * high order bits so that there are no out of bounds values
964 * present in the registers during the reprogramming.
966 I915_WRITE(DSPHOWM, 0);
967 I915_WRITE(DSPHOWM1, 0);
968 I915_WRITE(DSPFW4, 0);
969 I915_WRITE(DSPFW5, 0);
970 I915_WRITE(DSPFW6, 0);
973 FW_WM(wm->sr.plane, SR) |
974 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
975 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
976 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
978 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
979 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
980 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
982 FW_WM(wm->sr.cursor, CURSOR_SR));
984 if (IS_CHERRYVIEW(dev_priv)) {
985 I915_WRITE(DSPFW7_CHV,
986 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
987 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
988 I915_WRITE(DSPFW8_CHV,
989 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
990 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
991 I915_WRITE(DSPFW9_CHV,
992 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
993 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
995 FW_WM(wm->sr.plane >> 9, SR_HI) |
996 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
997 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
998 FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
999 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1000 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1001 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1002 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1003 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1004 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1007 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
1008 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1010 FW_WM(wm->sr.plane >> 9, SR_HI) |
1011 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
1012 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
1013 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
1014 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
1015 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
1016 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1019 POSTING_READ(DSPFW1);
1024 /* latency must be in 0.1us units. */
1025 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
1026 unsigned int pipe_htotal,
1027 unsigned int horiz_pixels,
1029 unsigned int latency)
1033 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1034 ret = (ret + 1) * horiz_pixels * cpp;
1035 ret = DIV_ROUND_UP(ret, 64);
1040 static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
1042 /* all latencies in usec */
1043 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
1045 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
1047 if (IS_CHERRYVIEW(dev_priv)) {
1048 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
1049 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1051 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
1055 static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1056 const struct intel_plane_state *plane_state,
1059 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1060 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1061 const struct drm_display_mode *adjusted_mode =
1062 &crtc_state->base.adjusted_mode;
1063 int clock, htotal, cpp, width, wm;
1065 if (dev_priv->wm.pri_latency[level] == 0)
1068 if (!intel_wm_plane_visible(crtc_state, plane_state))
1071 cpp = plane_state->base.fb->format->cpp[0];
1072 clock = adjusted_mode->crtc_clock;
1073 htotal = adjusted_mode->crtc_htotal;
1074 width = crtc_state->pipe_src_w;
1075 if (WARN_ON(htotal == 0))
1078 if (plane->id == PLANE_CURSOR) {
1080 * FIXME the formula gives values that are
1081 * too big for the cursor FIFO, and hence we
1082 * would never be able to use cursors. For
1083 * now just hardcode the watermark.
1087 wm = vlv_wm_method2(clock, htotal, width, cpp,
1088 dev_priv->wm.pri_latency[level] * 10);
1091 return min_t(int, wm, USHRT_MAX);
1094 static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
1096 return (active_planes & (BIT(PLANE_SPRITE0) |
1097 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
1100 static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1102 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1103 const struct g4x_pipe_wm *raw =
1104 &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1105 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1106 unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1107 int num_active_planes = hweight32(active_planes);
1108 const int fifo_size = 511;
1109 int fifo_extra, fifo_left = fifo_size;
1110 int sprite0_fifo_extra = 0;
1111 unsigned int total_rate;
1112 enum plane_id plane_id;
1115 * When enabling sprite0 after sprite1 has already been enabled
1116 * we tend to get an underrun unless sprite0 already has some
1117 * FIFO space allcoated. Hence we always allocate at least one
1118 * cacheline for sprite0 whenever sprite1 is enabled.
1120 * All other plane enable sequences appear immune to this problem.
1122 if (vlv_need_sprite0_fifo_workaround(active_planes))
1123 sprite0_fifo_extra = 1;
1125 total_rate = raw->plane[PLANE_PRIMARY] +
1126 raw->plane[PLANE_SPRITE0] +
1127 raw->plane[PLANE_SPRITE1] +
1130 if (total_rate > fifo_size)
1133 if (total_rate == 0)
1136 for_each_plane_id_on_crtc(crtc, plane_id) {
1139 if ((active_planes & BIT(plane_id)) == 0) {
1140 fifo_state->plane[plane_id] = 0;
1144 rate = raw->plane[plane_id];
1145 fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
1146 fifo_left -= fifo_state->plane[plane_id];
1149 fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
1150 fifo_left -= sprite0_fifo_extra;
1152 fifo_state->plane[PLANE_CURSOR] = 63;
1154 fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
1156 /* spread the remainder evenly */
1157 for_each_plane_id_on_crtc(crtc, plane_id) {
1163 if ((active_planes & BIT(plane_id)) == 0)
1166 plane_extra = min(fifo_extra, fifo_left);
1167 fifo_state->plane[plane_id] += plane_extra;
1168 fifo_left -= plane_extra;
1171 WARN_ON(active_planes != 0 && fifo_left != 0);
1173 /* give it all to the first plane if none are active */
1174 if (active_planes == 0) {
1175 WARN_ON(fifo_left != fifo_size);
1176 fifo_state->plane[PLANE_PRIMARY] = fifo_left;
1182 /* mark all levels starting from 'level' as invalid */
1183 static void vlv_invalidate_wms(struct intel_crtc *crtc,
1184 struct vlv_wm_state *wm_state, int level)
1186 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1188 for (; level < intel_wm_num_levels(dev_priv); level++) {
1189 enum plane_id plane_id;
1191 for_each_plane_id_on_crtc(crtc, plane_id)
1192 wm_state->wm[level].plane[plane_id] = USHRT_MAX;
1194 wm_state->sr[level].cursor = USHRT_MAX;
1195 wm_state->sr[level].plane = USHRT_MAX;
1199 static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1204 return fifo_size - wm;
1208 * Starting from 'level' set all higher
1209 * levels to 'value' in the "raw" watermarks.
1211 static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1212 int level, enum plane_id plane_id, u16 value)
1214 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1215 int num_levels = intel_wm_num_levels(dev_priv);
1218 for (; level < num_levels; level++) {
1219 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1221 dirty |= raw->plane[plane_id] != value;
1222 raw->plane[plane_id] = value;
1228 static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1229 const struct intel_plane_state *plane_state)
1231 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1232 enum plane_id plane_id = plane->id;
1233 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1237 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1238 dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1242 for (level = 0; level < num_levels; level++) {
1243 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1244 int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
1245 int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
1250 dirty |= raw->plane[plane_id] != wm;
1251 raw->plane[plane_id] = wm;
1254 /* mark all higher levels as invalid */
1255 dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1259 DRM_DEBUG_KMS("%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1261 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
1262 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
1263 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
1268 static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1269 enum plane_id plane_id, int level)
1271 const struct g4x_pipe_wm *raw =
1272 &crtc_state->wm.vlv.raw[level];
1273 const struct vlv_fifo_state *fifo_state =
1274 &crtc_state->wm.vlv.fifo_state;
1276 return raw->plane[plane_id] <= fifo_state->plane[plane_id];
1279 static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
1281 return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1282 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1283 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
1284 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1287 static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1289 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1290 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1291 struct intel_atomic_state *state =
1292 to_intel_atomic_state(crtc_state->base.state);
1293 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1294 const struct vlv_fifo_state *fifo_state =
1295 &crtc_state->wm.vlv.fifo_state;
1296 int num_active_planes = hweight32(crtc_state->active_planes &
1297 ~BIT(PLANE_CURSOR));
1298 bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
1299 struct intel_plane_state *plane_state;
1300 struct intel_plane *plane;
1301 enum plane_id plane_id;
1303 unsigned int dirty = 0;
1305 for_each_intel_plane_in_state(state, plane, plane_state, i) {
1306 const struct intel_plane_state *old_plane_state =
1307 to_intel_plane_state(plane->base.state);
1309 if (plane_state->base.crtc != &crtc->base &&
1310 old_plane_state->base.crtc != &crtc->base)
1313 if (vlv_raw_plane_wm_compute(crtc_state, plane_state))
1314 dirty |= BIT(plane->id);
1318 * DSPARB registers may have been reset due to the
1319 * power well being turned off. Make sure we restore
1320 * them to a consistent state even if no primary/sprite
1321 * planes are initially active.
1324 crtc_state->fifo_changed = true;
1329 /* cursor changes don't warrant a FIFO recompute */
1330 if (dirty & ~BIT(PLANE_CURSOR)) {
1331 const struct intel_crtc_state *old_crtc_state =
1332 to_intel_crtc_state(crtc->base.state);
1333 const struct vlv_fifo_state *old_fifo_state =
1334 &old_crtc_state->wm.vlv.fifo_state;
1336 ret = vlv_compute_fifo(crtc_state);
1340 if (needs_modeset ||
1341 memcmp(old_fifo_state, fifo_state,
1342 sizeof(*fifo_state)) != 0)
1343 crtc_state->fifo_changed = true;
1346 /* initially allow all levels */
1347 wm_state->num_levels = intel_wm_num_levels(dev_priv);
1349 * Note that enabling cxsr with no primary/sprite planes
1350 * enabled can wedge the pipe. Hence we only allow cxsr
1351 * with exactly one enabled primary/sprite plane.
1353 wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
1355 for (level = 0; level < wm_state->num_levels; level++) {
1356 const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1357 const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
1359 if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
1362 for_each_plane_id_on_crtc(crtc, plane_id) {
1363 wm_state->wm[level].plane[plane_id] =
1364 vlv_invert_wm_value(raw->plane[plane_id],
1365 fifo_state->plane[plane_id]);
1368 wm_state->sr[level].plane =
1369 vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
1370 raw->plane[PLANE_SPRITE0],
1371 raw->plane[PLANE_SPRITE1]),
1374 wm_state->sr[level].cursor =
1375 vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
1382 /* limit to only levels we can actually handle */
1383 wm_state->num_levels = level;
1385 /* invalidate the higher levels */
1386 vlv_invalidate_wms(crtc, wm_state, level);
1391 #define VLV_FIFO(plane, value) \
1392 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1394 static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
1395 struct intel_crtc_state *crtc_state)
1397 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1398 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1399 const struct vlv_fifo_state *fifo_state =
1400 &crtc_state->wm.vlv.fifo_state;
1401 int sprite0_start, sprite1_start, fifo_size;
1403 if (!crtc_state->fifo_changed)
1406 sprite0_start = fifo_state->plane[PLANE_PRIMARY];
1407 sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
1408 fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
1410 WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63);
1411 WARN_ON(fifo_size != 511);
1413 trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
1416 * uncore.lock serves a double purpose here. It allows us to
1417 * use the less expensive I915_{READ,WRITE}_FW() functions, and
1418 * it protects the DSPARB registers from getting clobbered by
1419 * parallel updates from multiple pipes.
1421 * intel_pipe_update_start() has already disabled interrupts
1422 * for us, so a plain spin_lock() is sufficient here.
1424 spin_lock(&dev_priv->uncore.lock);
1426 switch (crtc->pipe) {
1427 uint32_t dsparb, dsparb2, dsparb3;
1429 dsparb = I915_READ_FW(DSPARB);
1430 dsparb2 = I915_READ_FW(DSPARB2);
1432 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1433 VLV_FIFO(SPRITEB, 0xff));
1434 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1435 VLV_FIFO(SPRITEB, sprite1_start));
1437 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1438 VLV_FIFO(SPRITEB_HI, 0x1));
1439 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1440 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1442 I915_WRITE_FW(DSPARB, dsparb);
1443 I915_WRITE_FW(DSPARB2, dsparb2);
1446 dsparb = I915_READ_FW(DSPARB);
1447 dsparb2 = I915_READ_FW(DSPARB2);
1449 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1450 VLV_FIFO(SPRITED, 0xff));
1451 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1452 VLV_FIFO(SPRITED, sprite1_start));
1454 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1455 VLV_FIFO(SPRITED_HI, 0xff));
1456 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1457 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1459 I915_WRITE_FW(DSPARB, dsparb);
1460 I915_WRITE_FW(DSPARB2, dsparb2);
1463 dsparb3 = I915_READ_FW(DSPARB3);
1464 dsparb2 = I915_READ_FW(DSPARB2);
1466 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1467 VLV_FIFO(SPRITEF, 0xff));
1468 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1469 VLV_FIFO(SPRITEF, sprite1_start));
1471 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1472 VLV_FIFO(SPRITEF_HI, 0xff));
1473 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1474 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1476 I915_WRITE_FW(DSPARB3, dsparb3);
1477 I915_WRITE_FW(DSPARB2, dsparb2);
1483 POSTING_READ_FW(DSPARB);
1485 spin_unlock(&dev_priv->uncore.lock);
1490 static int vlv_compute_intermediate_wm(struct drm_device *dev,
1491 struct intel_crtc *crtc,
1492 struct intel_crtc_state *crtc_state)
1494 struct vlv_wm_state *intermediate = &crtc_state->wm.vlv.intermediate;
1495 const struct vlv_wm_state *optimal = &crtc_state->wm.vlv.optimal;
1496 const struct vlv_wm_state *active = &crtc->wm.active.vlv;
1499 intermediate->num_levels = min(optimal->num_levels, active->num_levels);
1500 intermediate->cxsr = optimal->cxsr && active->cxsr &&
1501 !crtc_state->disable_cxsr;
1503 for (level = 0; level < intermediate->num_levels; level++) {
1504 enum plane_id plane_id;
1506 for_each_plane_id_on_crtc(crtc, plane_id) {
1507 intermediate->wm[level].plane[plane_id] =
1508 min(optimal->wm[level].plane[plane_id],
1509 active->wm[level].plane[plane_id]);
1512 intermediate->sr[level].plane = min(optimal->sr[level].plane,
1513 active->sr[level].plane);
1514 intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
1515 active->sr[level].cursor);
1518 vlv_invalidate_wms(crtc, intermediate, level);
1521 * If our intermediate WM are identical to the final WM, then we can
1522 * omit the post-vblank programming; only update if it's different.
1524 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1525 crtc_state->wm.need_postvbl_update = true;
1530 static void vlv_merge_wm(struct drm_i915_private *dev_priv,
1531 struct vlv_wm_values *wm)
1533 struct intel_crtc *crtc;
1534 int num_active_crtcs = 0;
1536 wm->level = dev_priv->wm.max_level;
1539 for_each_intel_crtc(&dev_priv->drm, crtc) {
1540 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
1545 if (!wm_state->cxsr)
1549 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1552 if (num_active_crtcs != 1)
1555 if (num_active_crtcs > 1)
1556 wm->level = VLV_WM_LEVEL_PM2;
1558 for_each_intel_crtc(&dev_priv->drm, crtc) {
1559 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
1560 enum pipe pipe = crtc->pipe;
1562 wm->pipe[pipe] = wm_state->wm[wm->level];
1563 if (crtc->active && wm->cxsr)
1564 wm->sr = wm_state->sr[wm->level];
1566 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
1567 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
1568 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
1569 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
1573 static bool is_disabling(int old, int new, int threshold)
1575 return old >= threshold && new < threshold;
1578 static bool is_enabling(int old, int new, int threshold)
1580 return old < threshold && new >= threshold;
1583 static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
1585 struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
1586 struct vlv_wm_values new_wm = {};
1588 vlv_merge_wm(dev_priv, &new_wm);
1590 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
1593 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
1594 chv_set_memory_dvfs(dev_priv, false);
1596 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
1597 chv_set_memory_pm5(dev_priv, false);
1599 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1600 _intel_set_memory_cxsr(dev_priv, false);
1602 vlv_write_wm_values(dev_priv, &new_wm);
1604 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1605 _intel_set_memory_cxsr(dev_priv, true);
1607 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
1608 chv_set_memory_pm5(dev_priv, true);
1610 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
1611 chv_set_memory_dvfs(dev_priv, true);
1616 static void vlv_initial_watermarks(struct intel_atomic_state *state,
1617 struct intel_crtc_state *crtc_state)
1619 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1620 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1622 mutex_lock(&dev_priv->wm.wm_mutex);
1623 crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
1624 vlv_program_watermarks(dev_priv);
1625 mutex_unlock(&dev_priv->wm.wm_mutex);
1628 static void vlv_optimize_watermarks(struct intel_atomic_state *state,
1629 struct intel_crtc_state *crtc_state)
1631 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1632 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
1634 if (!crtc_state->wm.need_postvbl_update)
1637 mutex_lock(&dev_priv->wm.wm_mutex);
1638 intel_crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
1639 vlv_program_watermarks(dev_priv);
1640 mutex_unlock(&dev_priv->wm.wm_mutex);
1643 #define single_plane_enabled(mask) is_power_of_2(mask)
1645 static void g4x_update_wm(struct intel_crtc *crtc)
1647 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1648 static const int sr_latency_ns = 12000;
1649 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1650 int plane_sr, cursor_sr;
1651 unsigned int enabled = 0;
1654 if (g4x_compute_wm0(dev_priv, PIPE_A,
1655 &g4x_wm_info, pessimal_latency_ns,
1656 &g4x_cursor_wm_info, pessimal_latency_ns,
1657 &planea_wm, &cursora_wm))
1658 enabled |= 1 << PIPE_A;
1660 if (g4x_compute_wm0(dev_priv, PIPE_B,
1661 &g4x_wm_info, pessimal_latency_ns,
1662 &g4x_cursor_wm_info, pessimal_latency_ns,
1663 &planeb_wm, &cursorb_wm))
1664 enabled |= 1 << PIPE_B;
1666 if (single_plane_enabled(enabled) &&
1667 g4x_compute_srwm(dev_priv, ffs(enabled) - 1,
1670 &g4x_cursor_wm_info,
1671 &plane_sr, &cursor_sr)) {
1672 cxsr_enabled = true;
1674 cxsr_enabled = false;
1675 intel_set_memory_cxsr(dev_priv, false);
1676 plane_sr = cursor_sr = 0;
1679 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1680 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1681 planea_wm, cursora_wm,
1682 planeb_wm, cursorb_wm,
1683 plane_sr, cursor_sr);
1686 FW_WM(plane_sr, SR) |
1687 FW_WM(cursorb_wm, CURSORB) |
1688 FW_WM(planeb_wm, PLANEB) |
1689 FW_WM(planea_wm, PLANEA));
1691 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1692 FW_WM(cursora_wm, CURSORA));
1693 /* HPLL off in SR has some issues on G4x... disable it */
1695 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1696 FW_WM(cursor_sr, CURSOR_SR));
1699 intel_set_memory_cxsr(dev_priv, true);
1702 static void i965_update_wm(struct intel_crtc *unused_crtc)
1704 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
1705 struct intel_crtc *crtc;
1710 /* Calc sr entries for one plane configs */
1711 crtc = single_enabled_crtc(dev_priv);
1713 /* self-refresh has much higher latency */
1714 static const int sr_latency_ns = 12000;
1715 const struct drm_display_mode *adjusted_mode =
1716 &crtc->config->base.adjusted_mode;
1717 const struct drm_framebuffer *fb =
1718 crtc->base.primary->state->fb;
1719 int clock = adjusted_mode->crtc_clock;
1720 int htotal = adjusted_mode->crtc_htotal;
1721 int hdisplay = crtc->config->pipe_src_w;
1722 int cpp = fb->format->cpp[0];
1723 unsigned long line_time_us;
1726 line_time_us = max(htotal * 1000 / clock, 1);
1728 /* Use ns/us then divide to preserve precision */
1729 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1731 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1732 srwm = I965_FIFO_SIZE - entries;
1736 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1739 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1740 4 * crtc->base.cursor->state->crtc_w;
1741 entries = DIV_ROUND_UP(entries,
1742 i965_cursor_wm_info.cacheline_size);
1743 cursor_sr = i965_cursor_wm_info.fifo_size -
1744 (entries + i965_cursor_wm_info.guard_size);
1746 if (cursor_sr > i965_cursor_wm_info.max_wm)
1747 cursor_sr = i965_cursor_wm_info.max_wm;
1749 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1750 "cursor %d\n", srwm, cursor_sr);
1752 cxsr_enabled = true;
1754 cxsr_enabled = false;
1755 /* Turn off self refresh if both pipes are enabled */
1756 intel_set_memory_cxsr(dev_priv, false);
1759 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1762 /* 965 has limitations... */
1763 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1767 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1768 FW_WM(8, PLANEC_OLD));
1769 /* update cursor SR watermark */
1770 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
1773 intel_set_memory_cxsr(dev_priv, true);
1778 static void i9xx_update_wm(struct intel_crtc *unused_crtc)
1780 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
1781 const struct intel_watermark_params *wm_info;
1786 int planea_wm, planeb_wm;
1787 struct intel_crtc *crtc, *enabled = NULL;
1789 if (IS_I945GM(dev_priv))
1790 wm_info = &i945_wm_info;
1791 else if (!IS_GEN2(dev_priv))
1792 wm_info = &i915_wm_info;
1794 wm_info = &i830_a_wm_info;
1796 fifo_size = dev_priv->display.get_fifo_size(dev_priv, 0);
1797 crtc = intel_get_crtc_for_plane(dev_priv, 0);
1798 if (intel_crtc_active(crtc)) {
1799 const struct drm_display_mode *adjusted_mode =
1800 &crtc->config->base.adjusted_mode;
1801 const struct drm_framebuffer *fb =
1802 crtc->base.primary->state->fb;
1805 if (IS_GEN2(dev_priv))
1808 cpp = fb->format->cpp[0];
1810 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1811 wm_info, fifo_size, cpp,
1812 pessimal_latency_ns);
1815 planea_wm = fifo_size - wm_info->guard_size;
1816 if (planea_wm > (long)wm_info->max_wm)
1817 planea_wm = wm_info->max_wm;
1820 if (IS_GEN2(dev_priv))
1821 wm_info = &i830_bc_wm_info;
1823 fifo_size = dev_priv->display.get_fifo_size(dev_priv, 1);
1824 crtc = intel_get_crtc_for_plane(dev_priv, 1);
1825 if (intel_crtc_active(crtc)) {
1826 const struct drm_display_mode *adjusted_mode =
1827 &crtc->config->base.adjusted_mode;
1828 const struct drm_framebuffer *fb =
1829 crtc->base.primary->state->fb;
1832 if (IS_GEN2(dev_priv))
1835 cpp = fb->format->cpp[0];
1837 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1838 wm_info, fifo_size, cpp,
1839 pessimal_latency_ns);
1840 if (enabled == NULL)
1845 planeb_wm = fifo_size - wm_info->guard_size;
1846 if (planeb_wm > (long)wm_info->max_wm)
1847 planeb_wm = wm_info->max_wm;
1850 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1852 if (IS_I915GM(dev_priv) && enabled) {
1853 struct drm_i915_gem_object *obj;
1855 obj = intel_fb_obj(enabled->base.primary->state->fb);
1857 /* self-refresh seems busted with untiled */
1858 if (!i915_gem_object_is_tiled(obj))
1863 * Overlay gets an aggressive default since video jitter is bad.
1867 /* Play safe and disable self-refresh before adjusting watermarks. */
1868 intel_set_memory_cxsr(dev_priv, false);
1870 /* Calc sr entries for one plane configs */
1871 if (HAS_FW_BLC(dev_priv) && enabled) {
1872 /* self-refresh has much higher latency */
1873 static const int sr_latency_ns = 6000;
1874 const struct drm_display_mode *adjusted_mode =
1875 &enabled->config->base.adjusted_mode;
1876 const struct drm_framebuffer *fb =
1877 enabled->base.primary->state->fb;
1878 int clock = adjusted_mode->crtc_clock;
1879 int htotal = adjusted_mode->crtc_htotal;
1880 int hdisplay = enabled->config->pipe_src_w;
1882 unsigned long line_time_us;
1885 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
1888 cpp = fb->format->cpp[0];
1890 line_time_us = max(htotal * 1000 / clock, 1);
1892 /* Use ns/us then divide to preserve precision */
1893 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1895 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1896 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1897 srwm = wm_info->fifo_size - entries;
1901 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1902 I915_WRITE(FW_BLC_SELF,
1903 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1905 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1908 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1909 planea_wm, planeb_wm, cwm, srwm);
1911 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1912 fwater_hi = (cwm & 0x1f);
1914 /* Set request length to 8 cachelines per fetch */
1915 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1916 fwater_hi = fwater_hi | (1 << 8);
1918 I915_WRITE(FW_BLC, fwater_lo);
1919 I915_WRITE(FW_BLC2, fwater_hi);
1922 intel_set_memory_cxsr(dev_priv, true);
1925 static void i845_update_wm(struct intel_crtc *unused_crtc)
1927 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
1928 struct intel_crtc *crtc;
1929 const struct drm_display_mode *adjusted_mode;
1933 crtc = single_enabled_crtc(dev_priv);
1937 adjusted_mode = &crtc->config->base.adjusted_mode;
1938 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1940 dev_priv->display.get_fifo_size(dev_priv, 0),
1941 4, pessimal_latency_ns);
1942 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1943 fwater_lo |= (3<<8) | planea_wm;
1945 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1947 I915_WRITE(FW_BLC, fwater_lo);
1950 /* latency must be in 0.1us units. */
1951 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
1955 if (WARN(latency == 0, "Latency value missing\n"))
1958 ret = (uint64_t) pixel_rate * cpp * latency;
1959 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1964 /* latency must be in 0.1us units. */
1965 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1966 uint32_t horiz_pixels, uint8_t cpp,
1971 if (WARN(latency == 0, "Latency value missing\n"))
1973 if (WARN_ON(!pipe_htotal))
1976 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1977 ret = (ret + 1) * horiz_pixels * cpp;
1978 ret = DIV_ROUND_UP(ret, 64) + 2;
1982 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1986 * Neither of these should be possible since this function shouldn't be
1987 * called if the CRTC is off or the plane is invisible. But let's be
1988 * extra paranoid to avoid a potential divide-by-zero if we screw up
1989 * elsewhere in the driver.
1993 if (WARN_ON(!horiz_pixels))
1996 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
1999 struct ilk_wm_maximums {
2007 * For both WM_PIPE and WM_LP.
2008 * mem_value must be in 0.1us units.
2010 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
2011 const struct intel_plane_state *pstate,
2015 uint32_t method1, method2;
2018 if (!intel_wm_plane_visible(cstate, pstate))
2021 cpp = pstate->base.fb->format->cpp[0];
2023 method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
2028 method2 = ilk_wm_method2(cstate->pixel_rate,
2029 cstate->base.adjusted_mode.crtc_htotal,
2030 drm_rect_width(&pstate->base.dst),
2033 return min(method1, method2);
2037 * For both WM_PIPE and WM_LP.
2038 * mem_value must be in 0.1us units.
2040 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
2041 const struct intel_plane_state *pstate,
2044 uint32_t method1, method2;
2047 if (!intel_wm_plane_visible(cstate, pstate))
2050 cpp = pstate->base.fb->format->cpp[0];
2052 method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
2053 method2 = ilk_wm_method2(cstate->pixel_rate,
2054 cstate->base.adjusted_mode.crtc_htotal,
2055 drm_rect_width(&pstate->base.dst),
2057 return min(method1, method2);
2061 * For both WM_PIPE and WM_LP.
2062 * mem_value must be in 0.1us units.
2064 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
2065 const struct intel_plane_state *pstate,
2070 if (!intel_wm_plane_visible(cstate, pstate))
2073 cpp = pstate->base.fb->format->cpp[0];
2075 return ilk_wm_method2(cstate->pixel_rate,
2076 cstate->base.adjusted_mode.crtc_htotal,
2077 pstate->base.crtc_w, cpp, mem_value);
2080 /* Only for WM_LP. */
2081 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
2082 const struct intel_plane_state *pstate,
2087 if (!intel_wm_plane_visible(cstate, pstate))
2090 cpp = pstate->base.fb->format->cpp[0];
2092 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
2096 ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
2098 if (INTEL_GEN(dev_priv) >= 8)
2100 else if (INTEL_GEN(dev_priv) >= 7)
2107 ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
2108 int level, bool is_sprite)
2110 if (INTEL_GEN(dev_priv) >= 8)
2111 /* BDW primary/sprite plane watermarks */
2112 return level == 0 ? 255 : 2047;
2113 else if (INTEL_GEN(dev_priv) >= 7)
2114 /* IVB/HSW primary/sprite plane watermarks */
2115 return level == 0 ? 127 : 1023;
2116 else if (!is_sprite)
2117 /* ILK/SNB primary plane watermarks */
2118 return level == 0 ? 127 : 511;
2120 /* ILK/SNB sprite plane watermarks */
2121 return level == 0 ? 63 : 255;
2125 ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
2127 if (INTEL_GEN(dev_priv) >= 7)
2128 return level == 0 ? 63 : 255;
2130 return level == 0 ? 31 : 63;
2133 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2135 if (INTEL_GEN(dev_priv) >= 8)
2141 /* Calculate the maximum primary/sprite plane watermark */
2142 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2144 const struct intel_wm_config *config,
2145 enum intel_ddb_partitioning ddb_partitioning,
2148 struct drm_i915_private *dev_priv = to_i915(dev);
2149 unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2151 /* if sprites aren't enabled, sprites get nothing */
2152 if (is_sprite && !config->sprites_enabled)
2155 /* HSW allows LP1+ watermarks even with multiple pipes */
2156 if (level == 0 || config->num_pipes_active > 1) {
2157 fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
2160 * For some reason the non self refresh
2161 * FIFO size is only half of the self
2162 * refresh FIFO size on ILK/SNB.
2164 if (INTEL_GEN(dev_priv) <= 6)
2168 if (config->sprites_enabled) {
2169 /* level 0 is always calculated with 1:1 split */
2170 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2179 /* clamp to max that the registers can hold */
2180 return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
2183 /* Calculate the maximum cursor plane watermark */
2184 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
2186 const struct intel_wm_config *config)
2188 /* HSW LP1+ watermarks w/ multiple pipes */
2189 if (level > 0 && config->num_pipes_active > 1)
2192 /* otherwise just report max that registers can hold */
2193 return ilk_cursor_wm_reg_max(to_i915(dev), level);
2196 static void ilk_compute_wm_maximums(const struct drm_device *dev,
2198 const struct intel_wm_config *config,
2199 enum intel_ddb_partitioning ddb_partitioning,
2200 struct ilk_wm_maximums *max)
2202 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2203 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2204 max->cur = ilk_cursor_wm_max(dev, level, config);
2205 max->fbc = ilk_fbc_wm_reg_max(to_i915(dev));
2208 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
2210 struct ilk_wm_maximums *max)
2212 max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
2213 max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
2214 max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
2215 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2218 static bool ilk_validate_wm_level(int level,
2219 const struct ilk_wm_maximums *max,
2220 struct intel_wm_level *result)
2224 /* already determined to be invalid? */
2225 if (!result->enable)
2228 result->enable = result->pri_val <= max->pri &&
2229 result->spr_val <= max->spr &&
2230 result->cur_val <= max->cur;
2232 ret = result->enable;
2235 * HACK until we can pre-compute everything,
2236 * and thus fail gracefully if LP0 watermarks
2239 if (level == 0 && !result->enable) {
2240 if (result->pri_val > max->pri)
2241 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2242 level, result->pri_val, max->pri);
2243 if (result->spr_val > max->spr)
2244 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2245 level, result->spr_val, max->spr);
2246 if (result->cur_val > max->cur)
2247 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2248 level, result->cur_val, max->cur);
2250 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2251 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2252 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2253 result->enable = true;
2259 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2260 const struct intel_crtc *intel_crtc,
2262 struct intel_crtc_state *cstate,
2263 struct intel_plane_state *pristate,
2264 struct intel_plane_state *sprstate,
2265 struct intel_plane_state *curstate,
2266 struct intel_wm_level *result)
2268 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2269 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2270 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2272 /* WM1+ latency values stored in 0.5us units */
2280 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2281 pri_latency, level);
2282 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2286 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2289 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2291 result->enable = true;
2295 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2297 const struct intel_atomic_state *intel_state =
2298 to_intel_atomic_state(cstate->base.state);
2299 const struct drm_display_mode *adjusted_mode =
2300 &cstate->base.adjusted_mode;
2301 u32 linetime, ips_linetime;
2303 if (!cstate->base.active)
2305 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2307 if (WARN_ON(intel_state->cdclk.logical.cdclk == 0))
2310 /* The WM are computed with base on how long it takes to fill a single
2311 * row at the given clock rate, multiplied by 8.
2313 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2314 adjusted_mode->crtc_clock);
2315 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2316 intel_state->cdclk.logical.cdclk);
2318 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2319 PIPE_WM_LINETIME_TIME(linetime);
2322 static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2325 if (IS_GEN9(dev_priv)) {
2328 int level, max_level = ilk_wm_max_level(dev_priv);
2330 /* read the first set of memory latencies[0:3] */
2331 val = 0; /* data0 to be programmed to 0 for first set */
2332 mutex_lock(&dev_priv->rps.hw_lock);
2333 ret = sandybridge_pcode_read(dev_priv,
2334 GEN9_PCODE_READ_MEM_LATENCY,
2336 mutex_unlock(&dev_priv->rps.hw_lock);
2339 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2343 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2344 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2345 GEN9_MEM_LATENCY_LEVEL_MASK;
2346 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2347 GEN9_MEM_LATENCY_LEVEL_MASK;
2348 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2349 GEN9_MEM_LATENCY_LEVEL_MASK;
2351 /* read the second set of memory latencies[4:7] */
2352 val = 1; /* data0 to be programmed to 1 for second set */
2353 mutex_lock(&dev_priv->rps.hw_lock);
2354 ret = sandybridge_pcode_read(dev_priv,
2355 GEN9_PCODE_READ_MEM_LATENCY,
2357 mutex_unlock(&dev_priv->rps.hw_lock);
2359 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2363 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2364 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2365 GEN9_MEM_LATENCY_LEVEL_MASK;
2366 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2367 GEN9_MEM_LATENCY_LEVEL_MASK;
2368 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2369 GEN9_MEM_LATENCY_LEVEL_MASK;
2372 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2373 * need to be disabled. We make sure to sanitize the values out
2374 * of the punit to satisfy this requirement.
2376 for (level = 1; level <= max_level; level++) {
2377 if (wm[level] == 0) {
2378 for (i = level + 1; i <= max_level; i++)
2385 * WaWmMemoryReadLatency:skl,glk
2387 * punit doesn't take into account the read latency so we need
2388 * to add 2us to the various latency levels we retrieve from the
2389 * punit when level 0 response data us 0us.
2393 for (level = 1; level <= max_level; level++) {
2400 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2401 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2403 wm[0] = (sskpd >> 56) & 0xFF;
2405 wm[0] = sskpd & 0xF;
2406 wm[1] = (sskpd >> 4) & 0xFF;
2407 wm[2] = (sskpd >> 12) & 0xFF;
2408 wm[3] = (sskpd >> 20) & 0x1FF;
2409 wm[4] = (sskpd >> 32) & 0x1FF;
2410 } else if (INTEL_GEN(dev_priv) >= 6) {
2411 uint32_t sskpd = I915_READ(MCH_SSKPD);
2413 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2414 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2415 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2416 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2417 } else if (INTEL_GEN(dev_priv) >= 5) {
2418 uint32_t mltr = I915_READ(MLTR_ILK);
2420 /* ILK primary LP0 latency is 700 ns */
2422 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2423 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2427 static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2430 /* ILK sprite LP0 latency is 1300 ns */
2431 if (IS_GEN5(dev_priv))
2435 static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2438 /* ILK cursor LP0 latency is 1300 ns */
2439 if (IS_GEN5(dev_priv))
2442 /* WaDoubleCursorLP3Latency:ivb */
2443 if (IS_IVYBRIDGE(dev_priv))
2447 int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
2449 /* how many WM levels are we expecting */
2450 if (INTEL_GEN(dev_priv) >= 9)
2452 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2454 else if (INTEL_GEN(dev_priv) >= 6)
2460 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
2462 const uint16_t wm[8])
2464 int level, max_level = ilk_wm_max_level(dev_priv);
2466 for (level = 0; level <= max_level; level++) {
2467 unsigned int latency = wm[level];
2470 DRM_ERROR("%s WM%d latency not provided\n",
2476 * - latencies are in us on gen9.
2477 * - before then, WM1+ latency values are in 0.5us units
2479 if (IS_GEN9(dev_priv))
2484 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2485 name, level, wm[level],
2486 latency / 10, latency % 10);
2490 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2491 uint16_t wm[5], uint16_t min)
2493 int level, max_level = ilk_wm_max_level(dev_priv);
2498 wm[0] = max(wm[0], min);
2499 for (level = 1; level <= max_level; level++)
2500 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2505 static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
2510 * The BIOS provided WM memory latency values are often
2511 * inadequate for high resolution displays. Adjust them.
2513 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2514 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2515 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2520 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2521 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
2522 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
2523 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
2526 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
2528 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
2530 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2531 sizeof(dev_priv->wm.pri_latency));
2532 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2533 sizeof(dev_priv->wm.pri_latency));
2535 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
2536 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
2538 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
2539 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
2540 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
2542 if (IS_GEN6(dev_priv))
2543 snb_wm_latency_quirk(dev_priv);
2546 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
2548 intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
2549 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
2552 static bool ilk_validate_pipe_wm(struct drm_device *dev,
2553 struct intel_pipe_wm *pipe_wm)
2555 /* LP0 watermark maximums depend on this pipe alone */
2556 const struct intel_wm_config config = {
2557 .num_pipes_active = 1,
2558 .sprites_enabled = pipe_wm->sprites_enabled,
2559 .sprites_scaled = pipe_wm->sprites_scaled,
2561 struct ilk_wm_maximums max;
2563 /* LP0 watermarks always use 1/2 DDB partitioning */
2564 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2566 /* At least LP0 must be valid */
2567 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
2568 DRM_DEBUG_KMS("LP0 watermark invalid\n");
2575 /* Compute new watermarks for the pipe */
2576 static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2578 struct drm_atomic_state *state = cstate->base.state;
2579 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2580 struct intel_pipe_wm *pipe_wm;
2581 struct drm_device *dev = state->dev;
2582 const struct drm_i915_private *dev_priv = to_i915(dev);
2583 struct intel_plane *intel_plane;
2584 struct intel_plane_state *pristate = NULL;
2585 struct intel_plane_state *sprstate = NULL;
2586 struct intel_plane_state *curstate = NULL;
2587 int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
2588 struct ilk_wm_maximums max;
2590 pipe_wm = &cstate->wm.ilk.optimal;
2592 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2593 struct intel_plane_state *ps;
2595 ps = intel_atomic_get_existing_plane_state(state,
2600 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
2602 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
2604 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2608 pipe_wm->pipe_enabled = cstate->base.active;
2610 pipe_wm->sprites_enabled = sprstate->base.visible;
2611 pipe_wm->sprites_scaled = sprstate->base.visible &&
2612 (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
2613 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
2616 usable_level = max_level;
2618 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2619 if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
2622 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2623 if (pipe_wm->sprites_scaled)
2626 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
2627 pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
2629 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
2630 pipe_wm->wm[0] = pipe_wm->raw_wm[0];
2632 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2633 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
2635 if (!ilk_validate_pipe_wm(dev, pipe_wm))
2638 ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
2640 for (level = 1; level <= max_level; level++) {
2641 struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
2643 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
2644 pristate, sprstate, curstate, wm);
2647 * Disable any watermark level that exceeds the
2648 * register maximums since such watermarks are
2651 if (level > usable_level)
2654 if (ilk_validate_wm_level(level, &max, wm))
2655 pipe_wm->wm[level] = *wm;
2657 usable_level = level;
2664 * Build a set of 'intermediate' watermark values that satisfy both the old
2665 * state and the new state. These can be programmed to the hardware
2668 static int ilk_compute_intermediate_wm(struct drm_device *dev,
2669 struct intel_crtc *intel_crtc,
2670 struct intel_crtc_state *newstate)
2672 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
2673 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
2674 int level, max_level = ilk_wm_max_level(to_i915(dev));
2677 * Start with the final, target watermarks, then combine with the
2678 * currently active watermarks to get values that are safe both before
2679 * and after the vblank.
2681 *a = newstate->wm.ilk.optimal;
2682 a->pipe_enabled |= b->pipe_enabled;
2683 a->sprites_enabled |= b->sprites_enabled;
2684 a->sprites_scaled |= b->sprites_scaled;
2686 for (level = 0; level <= max_level; level++) {
2687 struct intel_wm_level *a_wm = &a->wm[level];
2688 const struct intel_wm_level *b_wm = &b->wm[level];
2690 a_wm->enable &= b_wm->enable;
2691 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
2692 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
2693 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
2694 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
2698 * We need to make sure that these merged watermark values are
2699 * actually a valid configuration themselves. If they're not,
2700 * there's no safe way to transition from the old state to
2701 * the new state, so we need to fail the atomic transaction.
2703 if (!ilk_validate_pipe_wm(dev, a))
2707 * If our intermediate WM are identical to the final WM, then we can
2708 * omit the post-vblank programming; only update if it's different.
2710 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0)
2711 newstate->wm.need_postvbl_update = true;
2717 * Merge the watermarks from all active pipes for a specific level.
2719 static void ilk_merge_wm_level(struct drm_device *dev,
2721 struct intel_wm_level *ret_wm)
2723 const struct intel_crtc *intel_crtc;
2725 ret_wm->enable = true;
2727 for_each_intel_crtc(dev, intel_crtc) {
2728 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
2729 const struct intel_wm_level *wm = &active->wm[level];
2731 if (!active->pipe_enabled)
2735 * The watermark values may have been used in the past,
2736 * so we must maintain them in the registers for some
2737 * time even if the level is now disabled.
2740 ret_wm->enable = false;
2742 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2743 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2744 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2745 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2750 * Merge all low power watermarks for all active pipes.
2752 static void ilk_wm_merge(struct drm_device *dev,
2753 const struct intel_wm_config *config,
2754 const struct ilk_wm_maximums *max,
2755 struct intel_pipe_wm *merged)
2757 struct drm_i915_private *dev_priv = to_i915(dev);
2758 int level, max_level = ilk_wm_max_level(dev_priv);
2759 int last_enabled_level = max_level;
2761 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2762 if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
2763 config->num_pipes_active > 1)
2764 last_enabled_level = 0;
2766 /* ILK: FBC WM must be disabled always */
2767 merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
2769 /* merge each WM1+ level */
2770 for (level = 1; level <= max_level; level++) {
2771 struct intel_wm_level *wm = &merged->wm[level];
2773 ilk_merge_wm_level(dev, level, wm);
2775 if (level > last_enabled_level)
2777 else if (!ilk_validate_wm_level(level, max, wm))
2778 /* make sure all following levels get disabled */
2779 last_enabled_level = level - 1;
2782 * The spec says it is preferred to disable
2783 * FBC WMs instead of disabling a WM level.
2785 if (wm->fbc_val > max->fbc) {
2787 merged->fbc_wm_enabled = false;
2792 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2794 * FIXME this is racy. FBC might get enabled later.
2795 * What we should check here is whether FBC can be
2796 * enabled sometime later.
2798 if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
2799 intel_fbc_is_active(dev_priv)) {
2800 for (level = 2; level <= max_level; level++) {
2801 struct intel_wm_level *wm = &merged->wm[level];
2808 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2810 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2811 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2814 /* The value we need to program into the WM_LPx latency field */
2815 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2817 struct drm_i915_private *dev_priv = to_i915(dev);
2819 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2822 return dev_priv->wm.pri_latency[level];
2825 static void ilk_compute_wm_results(struct drm_device *dev,
2826 const struct intel_pipe_wm *merged,
2827 enum intel_ddb_partitioning partitioning,
2828 struct ilk_wm_values *results)
2830 struct drm_i915_private *dev_priv = to_i915(dev);
2831 struct intel_crtc *intel_crtc;
2834 results->enable_fbc_wm = merged->fbc_wm_enabled;
2835 results->partitioning = partitioning;
2837 /* LP1+ register values */
2838 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2839 const struct intel_wm_level *r;
2841 level = ilk_wm_lp_to_level(wm_lp, merged);
2843 r = &merged->wm[level];
2846 * Maintain the watermark values even if the level is
2847 * disabled. Doing otherwise could cause underruns.
2849 results->wm_lp[wm_lp - 1] =
2850 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2851 (r->pri_val << WM1_LP_SR_SHIFT) |
2855 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2857 if (INTEL_GEN(dev_priv) >= 8)
2858 results->wm_lp[wm_lp - 1] |=
2859 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2861 results->wm_lp[wm_lp - 1] |=
2862 r->fbc_val << WM1_LP_FBC_SHIFT;
2865 * Always set WM1S_LP_EN when spr_val != 0, even if the
2866 * level is disabled. Doing otherwise could cause underruns.
2868 if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
2869 WARN_ON(wm_lp != 1);
2870 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2872 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2875 /* LP0 register values */
2876 for_each_intel_crtc(dev, intel_crtc) {
2877 enum pipe pipe = intel_crtc->pipe;
2878 const struct intel_wm_level *r =
2879 &intel_crtc->wm.active.ilk.wm[0];
2881 if (WARN_ON(!r->enable))
2884 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
2886 results->wm_pipe[pipe] =
2887 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2888 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2893 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2894 * case both are at the same level. Prefer r1 in case they're the same. */
2895 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2896 struct intel_pipe_wm *r1,
2897 struct intel_pipe_wm *r2)
2899 int level, max_level = ilk_wm_max_level(to_i915(dev));
2900 int level1 = 0, level2 = 0;
2902 for (level = 1; level <= max_level; level++) {
2903 if (r1->wm[level].enable)
2905 if (r2->wm[level].enable)
2909 if (level1 == level2) {
2910 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2914 } else if (level1 > level2) {
2921 /* dirty bits used to track which watermarks need changes */
2922 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2923 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2924 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2925 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2926 #define WM_DIRTY_FBC (1 << 24)
2927 #define WM_DIRTY_DDB (1 << 25)
2929 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2930 const struct ilk_wm_values *old,
2931 const struct ilk_wm_values *new)
2933 unsigned int dirty = 0;
2937 for_each_pipe(dev_priv, pipe) {
2938 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2939 dirty |= WM_DIRTY_LINETIME(pipe);
2940 /* Must disable LP1+ watermarks too */
2941 dirty |= WM_DIRTY_LP_ALL;
2944 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2945 dirty |= WM_DIRTY_PIPE(pipe);
2946 /* Must disable LP1+ watermarks too */
2947 dirty |= WM_DIRTY_LP_ALL;
2951 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2952 dirty |= WM_DIRTY_FBC;
2953 /* Must disable LP1+ watermarks too */
2954 dirty |= WM_DIRTY_LP_ALL;
2957 if (old->partitioning != new->partitioning) {
2958 dirty |= WM_DIRTY_DDB;
2959 /* Must disable LP1+ watermarks too */
2960 dirty |= WM_DIRTY_LP_ALL;
2963 /* LP1+ watermarks already deemed dirty, no need to continue */
2964 if (dirty & WM_DIRTY_LP_ALL)
2967 /* Find the lowest numbered LP1+ watermark in need of an update... */
2968 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2969 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2970 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2974 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2975 for (; wm_lp <= 3; wm_lp++)
2976 dirty |= WM_DIRTY_LP(wm_lp);
2981 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2984 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2985 bool changed = false;
2987 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2988 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2989 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2992 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2993 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2994 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2997 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2998 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2999 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
3004 * Don't touch WM1S_LP_EN here.
3005 * Doing so could cause underruns.
3012 * The spec says we shouldn't write when we don't need, because every write
3013 * causes WMs to be re-evaluated, expending some power.
3015 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
3016 struct ilk_wm_values *results)
3018 struct ilk_wm_values *previous = &dev_priv->wm.hw;
3022 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
3026 _ilk_disable_lp_wm(dev_priv, dirty);
3028 if (dirty & WM_DIRTY_PIPE(PIPE_A))
3029 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
3030 if (dirty & WM_DIRTY_PIPE(PIPE_B))
3031 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
3032 if (dirty & WM_DIRTY_PIPE(PIPE_C))
3033 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
3035 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
3036 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
3037 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
3038 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
3039 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
3040 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
3042 if (dirty & WM_DIRTY_DDB) {
3043 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3044 val = I915_READ(WM_MISC);
3045 if (results->partitioning == INTEL_DDB_PART_1_2)
3046 val &= ~WM_MISC_DATA_PARTITION_5_6;
3048 val |= WM_MISC_DATA_PARTITION_5_6;
3049 I915_WRITE(WM_MISC, val);
3051 val = I915_READ(DISP_ARB_CTL2);
3052 if (results->partitioning == INTEL_DDB_PART_1_2)
3053 val &= ~DISP_DATA_PARTITION_5_6;
3055 val |= DISP_DATA_PARTITION_5_6;
3056 I915_WRITE(DISP_ARB_CTL2, val);
3060 if (dirty & WM_DIRTY_FBC) {
3061 val = I915_READ(DISP_ARB_CTL);
3062 if (results->enable_fbc_wm)
3063 val &= ~DISP_FBC_WM_DIS;
3065 val |= DISP_FBC_WM_DIS;
3066 I915_WRITE(DISP_ARB_CTL, val);
3069 if (dirty & WM_DIRTY_LP(1) &&
3070 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
3071 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
3073 if (INTEL_GEN(dev_priv) >= 7) {
3074 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
3075 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
3076 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
3077 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
3080 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
3081 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
3082 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
3083 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
3084 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
3085 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
3087 dev_priv->wm.hw = *results;
3090 bool ilk_disable_lp_wm(struct drm_device *dev)
3092 struct drm_i915_private *dev_priv = to_i915(dev);
3094 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3097 #define SKL_SAGV_BLOCK_TIME 30 /* µs */
3100 * FIXME: We still don't have the proper code detect if we need to apply the WA,
3101 * so assume we'll always need it in order to avoid underruns.
3103 static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
3105 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3107 if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
3114 intel_has_sagv(struct drm_i915_private *dev_priv)
3116 if (IS_KABYLAKE(dev_priv))
3119 if (IS_SKYLAKE(dev_priv) &&
3120 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
3127 * SAGV dynamically adjusts the system agent voltage and clock frequencies
3128 * depending on power and performance requirements. The display engine access
3129 * to system memory is blocked during the adjustment time. Because of the
3130 * blocking time, having this enabled can cause full system hangs and/or pipe
3131 * underruns if we don't meet all of the following requirements:
3133 * - <= 1 pipe enabled
3134 * - All planes can enable watermarks for latencies >= SAGV engine block time
3135 * - We're not using an interlaced display configuration
3138 intel_enable_sagv(struct drm_i915_private *dev_priv)
3142 if (!intel_has_sagv(dev_priv))
3145 if (dev_priv->sagv_status == I915_SAGV_ENABLED)
3148 DRM_DEBUG_KMS("Enabling the SAGV\n");
3149 mutex_lock(&dev_priv->rps.hw_lock);
3151 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3154 /* We don't need to wait for the SAGV when enabling */
3155 mutex_unlock(&dev_priv->rps.hw_lock);
3158 * Some skl systems, pre-release machines in particular,
3159 * don't actually have an SAGV.
3161 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3162 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3163 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3165 } else if (ret < 0) {
3166 DRM_ERROR("Failed to enable the SAGV\n");
3170 dev_priv->sagv_status = I915_SAGV_ENABLED;
3175 intel_disable_sagv(struct drm_i915_private *dev_priv)
3179 if (!intel_has_sagv(dev_priv))
3182 if (dev_priv->sagv_status == I915_SAGV_DISABLED)
3185 DRM_DEBUG_KMS("Disabling the SAGV\n");
3186 mutex_lock(&dev_priv->rps.hw_lock);
3188 /* bspec says to keep retrying for at least 1 ms */
3189 ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
3191 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
3193 mutex_unlock(&dev_priv->rps.hw_lock);
3196 * Some skl systems, pre-release machines in particular,
3197 * don't actually have an SAGV.
3199 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3200 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3201 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3203 } else if (ret < 0) {
3204 DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
3208 dev_priv->sagv_status = I915_SAGV_DISABLED;
3212 bool intel_can_enable_sagv(struct drm_atomic_state *state)
3214 struct drm_device *dev = state->dev;
3215 struct drm_i915_private *dev_priv = to_i915(dev);
3216 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3217 struct intel_crtc *crtc;
3218 struct intel_plane *plane;
3219 struct intel_crtc_state *cstate;
3223 if (!intel_has_sagv(dev_priv))
3227 * SKL workaround: bspec recommends we disable the SAGV when we have
3228 * more then one pipe enabled
3230 * If there are no active CRTCs, no additional checks need be performed
3232 if (hweight32(intel_state->active_crtcs) == 0)
3234 else if (hweight32(intel_state->active_crtcs) > 1)
3237 /* Since we're now guaranteed to only have one active CRTC... */
3238 pipe = ffs(intel_state->active_crtcs) - 1;
3239 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
3240 cstate = to_intel_crtc_state(crtc->base.state);
3242 if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3245 for_each_intel_plane_on_crtc(dev, crtc, plane) {
3246 struct skl_plane_wm *wm =
3247 &cstate->wm.skl.optimal.planes[plane->id];
3249 /* Skip this plane if it's not enabled */
3250 if (!wm->wm[0].plane_en)
3253 /* Find the highest enabled wm level for this plane */
3254 for (level = ilk_wm_max_level(dev_priv);
3255 !wm->wm[level].plane_en; --level)
3258 latency = dev_priv->wm.skl_latency[level];
3260 if (skl_needs_memory_bw_wa(intel_state) &&
3261 plane->base.state->fb->modifier ==
3262 I915_FORMAT_MOD_X_TILED)
3266 * If any of the planes on this pipe don't enable wm levels
3267 * that incur memory latencies higher then 30µs we can't enable
3270 if (latency < SKL_SAGV_BLOCK_TIME)
3278 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
3279 const struct intel_crtc_state *cstate,
3280 struct skl_ddb_entry *alloc, /* out */
3281 int *num_active /* out */)
3283 struct drm_atomic_state *state = cstate->base.state;
3284 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3285 struct drm_i915_private *dev_priv = to_i915(dev);
3286 struct drm_crtc *for_crtc = cstate->base.crtc;
3287 unsigned int pipe_size, ddb_size;
3288 int nth_active_pipe;
3290 if (WARN_ON(!state) || !cstate->base.active) {
3293 *num_active = hweight32(dev_priv->active_crtcs);
3297 if (intel_state->active_pipe_changes)
3298 *num_active = hweight32(intel_state->active_crtcs);
3300 *num_active = hweight32(dev_priv->active_crtcs);
3302 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
3303 WARN_ON(ddb_size == 0);
3305 ddb_size -= 4; /* 4 blocks for bypass path allocation */
3308 * If the state doesn't change the active CRTC's, then there's
3309 * no need to recalculate; the existing pipe allocation limits
3310 * should remain unchanged. Note that we're safe from racing
3311 * commits since any racing commit that changes the active CRTC
3312 * list would need to grab _all_ crtc locks, including the one
3313 * we currently hold.
3315 if (!intel_state->active_pipe_changes) {
3317 * alloc may be cleared by clear_intel_crtc_state,
3318 * copy from old state to be sure
3320 *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
3324 nth_active_pipe = hweight32(intel_state->active_crtcs &
3325 (drm_crtc_mask(for_crtc) - 1));
3326 pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
3327 alloc->start = nth_active_pipe * ddb_size / *num_active;
3328 alloc->end = alloc->start + pipe_size;
3331 static unsigned int skl_cursor_allocation(int num_active)
3333 if (num_active == 1)
3339 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
3341 entry->start = reg & 0x3ff;
3342 entry->end = (reg >> 16) & 0x3ff;
3347 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
3348 struct skl_ddb_allocation *ddb /* out */)
3350 struct intel_crtc *crtc;
3352 memset(ddb, 0, sizeof(*ddb));
3354 for_each_intel_crtc(&dev_priv->drm, crtc) {
3355 enum intel_display_power_domain power_domain;
3356 enum plane_id plane_id;
3357 enum pipe pipe = crtc->pipe;
3359 power_domain = POWER_DOMAIN_PIPE(pipe);
3360 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3363 for_each_plane_id_on_crtc(crtc, plane_id) {
3366 if (plane_id != PLANE_CURSOR)
3367 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
3369 val = I915_READ(CUR_BUF_CFG(pipe));
3371 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val);
3374 intel_display_power_put(dev_priv, power_domain);
3379 * Determines the downscale amount of a plane for the purposes of watermark calculations.
3380 * The bspec defines downscale amount as:
3383 * Horizontal down scale amount = maximum[1, Horizontal source size /
3384 * Horizontal destination size]
3385 * Vertical down scale amount = maximum[1, Vertical source size /
3386 * Vertical destination size]
3387 * Total down scale amount = Horizontal down scale amount *
3388 * Vertical down scale amount
3391 * Return value is provided in 16.16 fixed point form to retain fractional part.
3392 * Caller should take care of dividing & rounding off the value.
3395 skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
3396 const struct intel_plane_state *pstate)
3398 struct intel_plane *plane = to_intel_plane(pstate->base.plane);
3399 uint32_t downscale_h, downscale_w;
3400 uint32_t src_w, src_h, dst_w, dst_h;
3402 if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
3403 return DRM_PLANE_HELPER_NO_SCALING;
3405 /* n.b., src is 16.16 fixed point, dst is whole integer */
3406 if (plane->id == PLANE_CURSOR) {
3407 src_w = pstate->base.src_w;
3408 src_h = pstate->base.src_h;
3409 dst_w = pstate->base.crtc_w;
3410 dst_h = pstate->base.crtc_h;
3412 src_w = drm_rect_width(&pstate->base.src);
3413 src_h = drm_rect_height(&pstate->base.src);
3414 dst_w = drm_rect_width(&pstate->base.dst);
3415 dst_h = drm_rect_height(&pstate->base.dst);
3418 if (drm_rotation_90_or_270(pstate->base.rotation))
3421 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3422 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3424 /* Provide result in 16.16 fixed point */
3425 return (uint64_t)downscale_w * downscale_h >> 16;
3429 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
3430 const struct drm_plane_state *pstate,
3433 struct intel_plane *plane = to_intel_plane(pstate->plane);
3434 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3435 uint32_t down_scale_amount, data_rate;
3436 uint32_t width = 0, height = 0;
3437 struct drm_framebuffer *fb;
3440 if (!intel_pstate->base.visible)
3444 format = fb->format->format;
3446 if (plane->id == PLANE_CURSOR)
3448 if (y && format != DRM_FORMAT_NV12)
3451 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3452 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3454 if (drm_rotation_90_or_270(pstate->rotation))
3455 swap(width, height);
3457 /* for planar format */
3458 if (format == DRM_FORMAT_NV12) {
3459 if (y) /* y-plane data rate */
3460 data_rate = width * height *
3462 else /* uv-plane data rate */
3463 data_rate = (width / 2) * (height / 2) *
3466 /* for packed formats */
3467 data_rate = width * height * fb->format->cpp[0];
3470 down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
3472 return (uint64_t)data_rate * down_scale_amount >> 16;
3476 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
3477 * a 8192x4096@32bpp framebuffer:
3478 * 3 * 4096 * 8192 * 4 < 2^32
3481 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
3482 unsigned *plane_data_rate,
3483 unsigned *plane_y_data_rate)
3485 struct drm_crtc_state *cstate = &intel_cstate->base;
3486 struct drm_atomic_state *state = cstate->state;
3487 struct drm_plane *plane;
3488 const struct drm_plane_state *pstate;
3489 unsigned int total_data_rate = 0;
3491 if (WARN_ON(!state))
3494 /* Calculate and cache data rate for each plane */
3495 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
3496 enum plane_id plane_id = to_intel_plane(plane)->id;
3500 rate = skl_plane_relative_data_rate(intel_cstate,
3502 plane_data_rate[plane_id] = rate;
3504 total_data_rate += rate;
3507 rate = skl_plane_relative_data_rate(intel_cstate,
3509 plane_y_data_rate[plane_id] = rate;
3511 total_data_rate += rate;
3514 return total_data_rate;
3518 skl_ddb_min_alloc(const struct drm_plane_state *pstate,
3521 struct drm_framebuffer *fb = pstate->fb;
3522 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3523 uint32_t src_w, src_h;
3524 uint32_t min_scanlines = 8;
3530 /* For packed formats, no y-plane, return 0 */
3531 if (y && fb->format->format != DRM_FORMAT_NV12)
3534 /* For Non Y-tile return 8-blocks */
3535 if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
3536 fb->modifier != I915_FORMAT_MOD_Yf_TILED)
3539 src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
3540 src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
3542 if (drm_rotation_90_or_270(pstate->rotation))
3545 /* Halve UV plane width and height for NV12 */
3546 if (fb->format->format == DRM_FORMAT_NV12 && !y) {
3551 if (fb->format->format == DRM_FORMAT_NV12 && !y)
3552 plane_bpp = fb->format->cpp[1];
3554 plane_bpp = fb->format->cpp[0];
3556 if (drm_rotation_90_or_270(pstate->rotation)) {
3557 switch (plane_bpp) {
3571 WARN(1, "Unsupported pixel depth %u for rotation",
3577 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
3581 skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
3582 uint16_t *minimum, uint16_t *y_minimum)
3584 const struct drm_plane_state *pstate;
3585 struct drm_plane *plane;
3587 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
3588 enum plane_id plane_id = to_intel_plane(plane)->id;
3590 if (plane_id == PLANE_CURSOR)
3593 if (!pstate->visible)
3596 minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
3597 y_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
3600 minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
3604 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3605 struct skl_ddb_allocation *ddb /* out */)
3607 struct drm_atomic_state *state = cstate->base.state;
3608 struct drm_crtc *crtc = cstate->base.crtc;
3609 struct drm_device *dev = crtc->dev;
3610 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3611 enum pipe pipe = intel_crtc->pipe;
3612 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
3613 uint16_t alloc_size, start;
3614 uint16_t minimum[I915_MAX_PLANES] = {};
3615 uint16_t y_minimum[I915_MAX_PLANES] = {};
3616 unsigned int total_data_rate;
3617 enum plane_id plane_id;
3619 unsigned plane_data_rate[I915_MAX_PLANES] = {};
3620 unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
3622 /* Clear the partitioning for disabled planes. */
3623 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3624 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3626 if (WARN_ON(!state))
3629 if (!cstate->base.active) {
3630 alloc->start = alloc->end = 0;
3634 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
3635 alloc_size = skl_ddb_entry_size(alloc);
3636 if (alloc_size == 0) {
3637 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3641 skl_ddb_calc_min(cstate, num_active, minimum, y_minimum);
3644 * 1. Allocate the mininum required blocks for each active plane
3645 * and allocate the cursor, it doesn't require extra allocation
3646 * proportional to the data rate.
3649 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
3650 alloc_size -= minimum[plane_id];
3651 alloc_size -= y_minimum[plane_id];
3654 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
3655 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
3658 * 2. Distribute the remaining space in proportion to the amount of
3659 * data each plane needs to fetch from memory.
3661 * FIXME: we may not allocate every single block here.
3663 total_data_rate = skl_get_total_relative_data_rate(cstate,
3666 if (total_data_rate == 0)
3669 start = alloc->start;
3670 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
3671 unsigned int data_rate, y_data_rate;
3672 uint16_t plane_blocks, y_plane_blocks = 0;
3674 if (plane_id == PLANE_CURSOR)
3677 data_rate = plane_data_rate[plane_id];
3680 * allocation for (packed formats) or (uv-plane part of planar format):
3681 * promote the expression to 64 bits to avoid overflowing, the
3682 * result is < available as data_rate / total_data_rate < 1
3684 plane_blocks = minimum[plane_id];
3685 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3688 /* Leave disabled planes at (0,0) */
3690 ddb->plane[pipe][plane_id].start = start;
3691 ddb->plane[pipe][plane_id].end = start + plane_blocks;
3694 start += plane_blocks;
3697 * allocation for y_plane part of planar format:
3699 y_data_rate = plane_y_data_rate[plane_id];
3701 y_plane_blocks = y_minimum[plane_id];
3702 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3706 ddb->y_plane[pipe][plane_id].start = start;
3707 ddb->y_plane[pipe][plane_id].end = start + y_plane_blocks;
3710 start += y_plane_blocks;
3717 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3718 * for the read latency) and cpp should always be <= 8, so that
3719 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3720 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3722 static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp,
3725 uint32_t wm_intermediate_val;
3726 uint_fixed_16_16_t ret;
3729 return FP_16_16_MAX;
3731 wm_intermediate_val = latency * pixel_rate * cpp;
3732 ret = fixed_16_16_div_round_up_u64(wm_intermediate_val, 1000 * 512);
3736 static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
3737 uint32_t pipe_htotal,
3739 uint_fixed_16_16_t plane_blocks_per_line)
3741 uint32_t wm_intermediate_val;
3742 uint_fixed_16_16_t ret;
3745 return FP_16_16_MAX;
3747 wm_intermediate_val = latency * pixel_rate;
3748 wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
3749 pipe_htotal * 1000);
3750 ret = mul_u32_fixed_16_16(wm_intermediate_val, plane_blocks_per_line);
3754 static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
3755 struct intel_plane_state *pstate)
3757 uint64_t adjusted_pixel_rate;
3758 uint64_t downscale_amount;
3759 uint64_t pixel_rate;
3761 /* Shouldn't reach here on disabled planes... */
3762 if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
3766 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3767 * with additional adjustments for plane-specific scaling.
3769 adjusted_pixel_rate = cstate->pixel_rate;
3770 downscale_amount = skl_plane_downscale_amount(cstate, pstate);
3772 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
3773 WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
3778 static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3779 struct intel_crtc_state *cstate,
3780 struct intel_plane_state *intel_pstate,
3781 uint16_t ddb_allocation,
3783 uint16_t *out_blocks, /* out */
3784 uint8_t *out_lines, /* out */
3785 bool *enabled /* out */)
3787 struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
3788 struct drm_plane_state *pstate = &intel_pstate->base;
3789 struct drm_framebuffer *fb = pstate->fb;
3790 uint32_t latency = dev_priv->wm.skl_latency[level];
3791 uint_fixed_16_16_t method1, method2;
3792 uint_fixed_16_16_t plane_blocks_per_line;
3793 uint_fixed_16_16_t selected_result;
3794 uint32_t interm_pbpl;
3795 uint32_t plane_bytes_per_line;
3796 uint32_t res_blocks, res_lines;
3798 uint32_t width = 0, height = 0;
3799 uint32_t plane_pixel_rate;
3800 uint_fixed_16_16_t y_tile_minimum;
3801 uint32_t y_min_scanlines;
3802 struct intel_atomic_state *state =
3803 to_intel_atomic_state(cstate->base.state);
3804 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
3805 bool y_tiled, x_tiled;
3808 !intel_wm_plane_visible(cstate, intel_pstate)) {
3813 y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
3814 fb->modifier == I915_FORMAT_MOD_Yf_TILED;
3815 x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
3817 /* Display WA #1141: kbl. */
3818 if (IS_KABYLAKE(dev_priv) && dev_priv->ipc_enabled)
3821 if (apply_memory_bw_wa && x_tiled)
3824 if (plane->id == PLANE_CURSOR) {
3825 width = intel_pstate->base.crtc_w;
3826 height = intel_pstate->base.crtc_h;
3828 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3829 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3832 if (drm_rotation_90_or_270(pstate->rotation))
3833 swap(width, height);
3835 cpp = fb->format->cpp[0];
3836 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
3838 if (drm_rotation_90_or_270(pstate->rotation)) {
3839 int cpp = (fb->format->format == DRM_FORMAT_NV12) ?
3840 fb->format->cpp[1] :
3845 y_min_scanlines = 16;
3848 y_min_scanlines = 8;
3851 y_min_scanlines = 4;
3858 y_min_scanlines = 4;
3861 if (apply_memory_bw_wa)
3862 y_min_scanlines *= 2;
3864 plane_bytes_per_line = width * cpp;
3866 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line *
3867 y_min_scanlines, 512);
3868 plane_blocks_per_line =
3869 fixed_16_16_div_round_up(interm_pbpl, y_min_scanlines);
3870 } else if (x_tiled) {
3871 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512);
3872 plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
3874 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
3875 plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
3878 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
3879 method2 = skl_wm_method2(plane_pixel_rate,
3880 cstate->base.adjusted_mode.crtc_htotal,
3882 plane_blocks_per_line);
3884 y_tile_minimum = mul_u32_fixed_16_16(y_min_scanlines,
3885 plane_blocks_per_line);
3888 selected_result = max_fixed_16_16(method2, y_tile_minimum);
3890 if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
3891 (plane_bytes_per_line / 512 < 1))
3892 selected_result = method2;
3893 else if ((ddb_allocation /
3894 fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1)
3895 selected_result = min_fixed_16_16(method1, method2);
3897 selected_result = method1;
3900 res_blocks = fixed_16_16_to_u32_round_up(selected_result) + 1;
3901 res_lines = DIV_ROUND_UP(selected_result.val,
3902 plane_blocks_per_line.val);
3904 if (level >= 1 && level <= 7) {
3906 res_blocks += fixed_16_16_to_u32_round_up(y_tile_minimum);
3907 res_lines += y_min_scanlines;
3913 if (res_blocks >= ddb_allocation || res_lines > 31) {
3917 * If there are no valid level 0 watermarks, then we can't
3918 * support this display configuration.
3923 struct drm_plane *plane = pstate->plane;
3925 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
3926 DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n",
3927 plane->base.id, plane->name,
3928 res_blocks, ddb_allocation, res_lines);
3933 *out_blocks = res_blocks;
3934 *out_lines = res_lines;
3941 skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3942 struct skl_ddb_allocation *ddb,
3943 struct intel_crtc_state *cstate,
3944 struct intel_plane *intel_plane,
3946 struct skl_wm_level *result)
3948 struct drm_atomic_state *state = cstate->base.state;
3949 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3950 struct drm_plane *plane = &intel_plane->base;
3951 struct intel_plane_state *intel_pstate = NULL;
3952 uint16_t ddb_blocks;
3953 enum pipe pipe = intel_crtc->pipe;
3958 intel_atomic_get_existing_plane_state(state,
3962 * Note: If we start supporting multiple pending atomic commits against
3963 * the same planes/CRTC's in the future, plane->state will no longer be
3964 * the correct pre-state to use for the calculations here and we'll
3965 * need to change where we get the 'unchanged' plane data from.
3967 * For now this is fine because we only allow one queued commit against
3968 * a CRTC. Even if the plane isn't modified by this transaction and we
3969 * don't have a plane lock, we still have the CRTC's lock, so we know
3970 * that no other transactions are racing with us to update it.
3973 intel_pstate = to_intel_plane_state(plane->state);
3975 WARN_ON(!intel_pstate->base.fb);
3977 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][intel_plane->id]);
3979 ret = skl_compute_plane_wm(dev_priv,
3984 &result->plane_res_b,
3985 &result->plane_res_l,
3994 skl_compute_linetime_wm(struct intel_crtc_state *cstate)
3996 struct drm_atomic_state *state = cstate->base.state;
3997 struct drm_i915_private *dev_priv = to_i915(state->dev);
3998 uint32_t pixel_rate;
3999 uint32_t linetime_wm;
4001 if (!cstate->base.active)
4004 pixel_rate = cstate->pixel_rate;
4006 if (WARN_ON(pixel_rate == 0))
4009 linetime_wm = DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal *
4012 /* Display WA #1135: bxt. */
4013 if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled)
4014 linetime_wm = DIV_ROUND_UP(linetime_wm, 2);
4019 static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
4020 struct skl_wm_level *trans_wm /* out */)
4022 if (!cstate->base.active)
4025 /* Until we know more, just disable transition WMs */
4026 trans_wm->plane_en = false;
4029 static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
4030 struct skl_ddb_allocation *ddb,
4031 struct skl_pipe_wm *pipe_wm)
4033 struct drm_device *dev = cstate->base.crtc->dev;
4034 const struct drm_i915_private *dev_priv = to_i915(dev);
4035 struct intel_plane *intel_plane;
4036 struct skl_plane_wm *wm;
4037 int level, max_level = ilk_wm_max_level(dev_priv);
4041 * We'll only calculate watermarks for planes that are actually
4042 * enabled, so make sure all other planes are set as disabled.
4044 memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
4046 for_each_intel_plane_mask(&dev_priv->drm,
4048 cstate->base.plane_mask) {
4049 wm = &pipe_wm->planes[intel_plane->id];
4051 for (level = 0; level <= max_level; level++) {
4052 ret = skl_compute_wm_level(dev_priv, ddb, cstate,
4058 skl_compute_transition_wm(cstate, &wm->trans_wm);
4060 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
4065 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
4067 const struct skl_ddb_entry *entry)
4070 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
4075 static void skl_write_wm_level(struct drm_i915_private *dev_priv,
4077 const struct skl_wm_level *level)
4081 if (level->plane_en) {
4083 val |= level->plane_res_b;
4084 val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
4087 I915_WRITE(reg, val);
4090 static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
4091 const struct skl_plane_wm *wm,
4092 const struct skl_ddb_allocation *ddb,
4093 enum plane_id plane_id)
4095 struct drm_crtc *crtc = &intel_crtc->base;
4096 struct drm_device *dev = crtc->dev;
4097 struct drm_i915_private *dev_priv = to_i915(dev);
4098 int level, max_level = ilk_wm_max_level(dev_priv);
4099 enum pipe pipe = intel_crtc->pipe;
4101 for (level = 0; level <= max_level; level++) {
4102 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
4105 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
4108 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
4109 &ddb->plane[pipe][plane_id]);
4110 skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane_id),
4111 &ddb->y_plane[pipe][plane_id]);
4114 static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
4115 const struct skl_plane_wm *wm,
4116 const struct skl_ddb_allocation *ddb)
4118 struct drm_crtc *crtc = &intel_crtc->base;
4119 struct drm_device *dev = crtc->dev;
4120 struct drm_i915_private *dev_priv = to_i915(dev);
4121 int level, max_level = ilk_wm_max_level(dev_priv);
4122 enum pipe pipe = intel_crtc->pipe;
4124 for (level = 0; level <= max_level; level++) {
4125 skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
4128 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
4130 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
4131 &ddb->plane[pipe][PLANE_CURSOR]);
4134 bool skl_wm_level_equals(const struct skl_wm_level *l1,
4135 const struct skl_wm_level *l2)
4137 if (l1->plane_en != l2->plane_en)
4140 /* If both planes aren't enabled, the rest shouldn't matter */
4144 return (l1->plane_res_l == l2->plane_res_l &&
4145 l1->plane_res_b == l2->plane_res_b);
4148 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
4149 const struct skl_ddb_entry *b)
4151 return a->start < b->end && b->start < a->end;
4154 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
4155 const struct skl_ddb_entry *ddb,
4160 for (i = 0; i < I915_MAX_PIPES; i++)
4161 if (i != ignore && entries[i] &&
4162 skl_ddb_entries_overlap(ddb, entries[i]))
4168 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
4169 const struct skl_pipe_wm *old_pipe_wm,
4170 struct skl_pipe_wm *pipe_wm, /* out */
4171 struct skl_ddb_allocation *ddb, /* out */
4172 bool *changed /* out */)
4174 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
4177 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
4181 if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm)))
4190 pipes_modified(struct drm_atomic_state *state)
4192 struct drm_crtc *crtc;
4193 struct drm_crtc_state *cstate;
4194 uint32_t i, ret = 0;
4196 for_each_new_crtc_in_state(state, crtc, cstate, i)
4197 ret |= drm_crtc_mask(crtc);
4203 skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
4205 struct drm_atomic_state *state = cstate->base.state;
4206 struct drm_device *dev = state->dev;
4207 struct drm_crtc *crtc = cstate->base.crtc;
4208 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4209 struct drm_i915_private *dev_priv = to_i915(dev);
4210 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4211 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
4212 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
4213 struct drm_plane_state *plane_state;
4214 struct drm_plane *plane;
4215 enum pipe pipe = intel_crtc->pipe;
4217 WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
4219 drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
4220 enum plane_id plane_id = to_intel_plane(plane)->id;
4222 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
4223 &new_ddb->plane[pipe][plane_id]) &&
4224 skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][plane_id],
4225 &new_ddb->y_plane[pipe][plane_id]))
4228 plane_state = drm_atomic_get_plane_state(state, plane);
4229 if (IS_ERR(plane_state))
4230 return PTR_ERR(plane_state);
4237 skl_compute_ddb(struct drm_atomic_state *state)
4239 struct drm_device *dev = state->dev;
4240 struct drm_i915_private *dev_priv = to_i915(dev);
4241 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4242 struct intel_crtc *intel_crtc;
4243 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
4244 uint32_t realloc_pipes = pipes_modified(state);
4248 * If this is our first atomic update following hardware readout,
4249 * we can't trust the DDB that the BIOS programmed for us. Let's
4250 * pretend that all pipes switched active status so that we'll
4251 * ensure a full DDB recompute.
4253 if (dev_priv->wm.distrust_bios_wm) {
4254 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4255 state->acquire_ctx);
4259 intel_state->active_pipe_changes = ~0;
4262 * We usually only initialize intel_state->active_crtcs if we
4263 * we're doing a modeset; make sure this field is always
4264 * initialized during the sanitization process that happens
4265 * on the first commit too.
4267 if (!intel_state->modeset)
4268 intel_state->active_crtcs = dev_priv->active_crtcs;
4272 * If the modeset changes which CRTC's are active, we need to
4273 * recompute the DDB allocation for *all* active pipes, even
4274 * those that weren't otherwise being modified in any way by this
4275 * atomic commit. Due to the shrinking of the per-pipe allocations
4276 * when new active CRTC's are added, it's possible for a pipe that
4277 * we were already using and aren't changing at all here to suddenly
4278 * become invalid if its DDB needs exceeds its new allocation.
4280 * Note that if we wind up doing a full DDB recompute, we can't let
4281 * any other display updates race with this transaction, so we need
4282 * to grab the lock on *all* CRTC's.
4284 if (intel_state->active_pipe_changes) {
4286 intel_state->wm_results.dirty_pipes = ~0;
4290 * We're not recomputing for the pipes not included in the commit, so
4291 * make sure we start with the current state.
4293 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
4295 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
4296 struct intel_crtc_state *cstate;
4298 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
4300 return PTR_ERR(cstate);
4302 ret = skl_allocate_pipe_ddb(cstate, ddb);
4306 ret = skl_ddb_add_affected_planes(cstate);
4315 skl_copy_wm_for_pipe(struct skl_wm_values *dst,
4316 struct skl_wm_values *src,
4319 memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
4320 sizeof(dst->ddb.y_plane[pipe]));
4321 memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
4322 sizeof(dst->ddb.plane[pipe]));
4326 skl_print_wm_changes(const struct drm_atomic_state *state)
4328 const struct drm_device *dev = state->dev;
4329 const struct drm_i915_private *dev_priv = to_i915(dev);
4330 const struct intel_atomic_state *intel_state =
4331 to_intel_atomic_state(state);
4332 const struct drm_crtc *crtc;
4333 const struct drm_crtc_state *cstate;
4334 const struct intel_plane *intel_plane;
4335 const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
4336 const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
4339 for_each_new_crtc_in_state(state, crtc, cstate, i) {
4340 const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4341 enum pipe pipe = intel_crtc->pipe;
4343 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
4344 enum plane_id plane_id = intel_plane->id;
4345 const struct skl_ddb_entry *old, *new;
4347 old = &old_ddb->plane[pipe][plane_id];
4348 new = &new_ddb->plane[pipe][plane_id];
4350 if (skl_ddb_entry_equal(old, new))
4353 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
4354 intel_plane->base.base.id,
4355 intel_plane->base.name,
4356 old->start, old->end,
4357 new->start, new->end);
4363 skl_compute_wm(struct drm_atomic_state *state)
4365 struct drm_crtc *crtc;
4366 struct drm_crtc_state *cstate;
4367 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4368 struct skl_wm_values *results = &intel_state->wm_results;
4369 struct skl_pipe_wm *pipe_wm;
4370 bool changed = false;
4374 * If this transaction isn't actually touching any CRTC's, don't
4375 * bother with watermark calculation. Note that if we pass this
4376 * test, we're guaranteed to hold at least one CRTC state mutex,
4377 * which means we can safely use values like dev_priv->active_crtcs
4378 * since any racing commits that want to update them would need to
4379 * hold _all_ CRTC state mutexes.
4381 for_each_new_crtc_in_state(state, crtc, cstate, i)
4386 /* Clear all dirty flags */
4387 results->dirty_pipes = 0;
4389 ret = skl_compute_ddb(state);
4394 * Calculate WM's for all pipes that are part of this transaction.
4395 * Note that the DDB allocation above may have added more CRTC's that
4396 * weren't otherwise being modified (and set bits in dirty_pipes) if
4397 * pipe allocations had to change.
4399 * FIXME: Now that we're doing this in the atomic check phase, we
4400 * should allow skl_update_pipe_wm() to return failure in cases where
4401 * no suitable watermark values can be found.
4403 for_each_new_crtc_in_state(state, crtc, cstate, i) {
4404 struct intel_crtc_state *intel_cstate =
4405 to_intel_crtc_state(cstate);
4406 const struct skl_pipe_wm *old_pipe_wm =
4407 &to_intel_crtc_state(crtc->state)->wm.skl.optimal;
4409 pipe_wm = &intel_cstate->wm.skl.optimal;
4410 ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
4411 &results->ddb, &changed);
4416 results->dirty_pipes |= drm_crtc_mask(crtc);
4418 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
4419 /* This pipe's WM's did not change */
4422 intel_cstate->update_wm_pre = true;
4425 skl_print_wm_changes(state);
4430 static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
4431 struct intel_crtc_state *cstate)
4433 struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
4434 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4435 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
4436 const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
4437 enum pipe pipe = crtc->pipe;
4438 enum plane_id plane_id;
4440 if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
4443 I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
4445 for_each_plane_id_on_crtc(crtc, plane_id) {
4446 if (plane_id != PLANE_CURSOR)
4447 skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
4450 skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
4455 static void skl_initial_wm(struct intel_atomic_state *state,
4456 struct intel_crtc_state *cstate)
4458 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4459 struct drm_device *dev = intel_crtc->base.dev;
4460 struct drm_i915_private *dev_priv = to_i915(dev);
4461 struct skl_wm_values *results = &state->wm_results;
4462 struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
4463 enum pipe pipe = intel_crtc->pipe;
4465 if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
4468 mutex_lock(&dev_priv->wm.wm_mutex);
4470 if (cstate->base.active_changed)
4471 skl_atomic_update_crtc_wm(state, cstate);
4473 skl_copy_wm_for_pipe(hw_vals, results, pipe);
4475 mutex_unlock(&dev_priv->wm.wm_mutex);
4478 static void ilk_compute_wm_config(struct drm_device *dev,
4479 struct intel_wm_config *config)
4481 struct intel_crtc *crtc;
4483 /* Compute the currently _active_ config */
4484 for_each_intel_crtc(dev, crtc) {
4485 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
4487 if (!wm->pipe_enabled)
4490 config->sprites_enabled |= wm->sprites_enabled;
4491 config->sprites_scaled |= wm->sprites_scaled;
4492 config->num_pipes_active++;
4496 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
4498 struct drm_device *dev = &dev_priv->drm;
4499 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
4500 struct ilk_wm_maximums max;
4501 struct intel_wm_config config = {};
4502 struct ilk_wm_values results = {};
4503 enum intel_ddb_partitioning partitioning;
4505 ilk_compute_wm_config(dev, &config);
4507 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
4508 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
4510 /* 5/6 split only in single pipe config on IVB+ */
4511 if (INTEL_GEN(dev_priv) >= 7 &&
4512 config.num_pipes_active == 1 && config.sprites_enabled) {
4513 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
4514 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
4516 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
4518 best_lp_wm = &lp_wm_1_2;
4521 partitioning = (best_lp_wm == &lp_wm_1_2) ?
4522 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
4524 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
4526 ilk_write_wm_values(dev_priv, &results);
4529 static void ilk_initial_watermarks(struct intel_atomic_state *state,
4530 struct intel_crtc_state *cstate)
4532 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4533 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4535 mutex_lock(&dev_priv->wm.wm_mutex);
4536 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
4537 ilk_program_watermarks(dev_priv);
4538 mutex_unlock(&dev_priv->wm.wm_mutex);
4541 static void ilk_optimize_watermarks(struct intel_atomic_state *state,
4542 struct intel_crtc_state *cstate)
4544 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4545 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4547 mutex_lock(&dev_priv->wm.wm_mutex);
4548 if (cstate->wm.need_postvbl_update) {
4549 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
4550 ilk_program_watermarks(dev_priv);
4552 mutex_unlock(&dev_priv->wm.wm_mutex);
4555 static inline void skl_wm_level_from_reg_val(uint32_t val,
4556 struct skl_wm_level *level)
4558 level->plane_en = val & PLANE_WM_EN;
4559 level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
4560 level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
4561 PLANE_WM_LINES_MASK;
4564 void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
4565 struct skl_pipe_wm *out)
4567 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4568 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4569 enum pipe pipe = intel_crtc->pipe;
4570 int level, max_level;
4571 enum plane_id plane_id;
4574 max_level = ilk_wm_max_level(dev_priv);
4576 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4577 struct skl_plane_wm *wm = &out->planes[plane_id];
4579 for (level = 0; level <= max_level; level++) {
4580 if (plane_id != PLANE_CURSOR)
4581 val = I915_READ(PLANE_WM(pipe, plane_id, level));
4583 val = I915_READ(CUR_WM(pipe, level));
4585 skl_wm_level_from_reg_val(val, &wm->wm[level]);
4588 if (plane_id != PLANE_CURSOR)
4589 val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
4591 val = I915_READ(CUR_WM_TRANS(pipe));
4593 skl_wm_level_from_reg_val(val, &wm->trans_wm);
4596 if (!intel_crtc->active)
4599 out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
4602 void skl_wm_get_hw_state(struct drm_device *dev)
4604 struct drm_i915_private *dev_priv = to_i915(dev);
4605 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
4606 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
4607 struct drm_crtc *crtc;
4608 struct intel_crtc *intel_crtc;
4609 struct intel_crtc_state *cstate;
4611 skl_ddb_get_hw_state(dev_priv, ddb);
4612 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4613 intel_crtc = to_intel_crtc(crtc);
4614 cstate = to_intel_crtc_state(crtc->state);
4616 skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
4618 if (intel_crtc->active)
4619 hw->dirty_pipes |= drm_crtc_mask(crtc);
4622 if (dev_priv->active_crtcs) {
4623 /* Fully recompute DDB on first atomic commit */
4624 dev_priv->wm.distrust_bios_wm = true;
4626 /* Easy/common case; just sanitize DDB now if everything off */
4627 memset(ddb, 0, sizeof(*ddb));
4631 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4633 struct drm_device *dev = crtc->dev;
4634 struct drm_i915_private *dev_priv = to_i915(dev);
4635 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4636 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4637 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4638 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
4639 enum pipe pipe = intel_crtc->pipe;
4640 static const i915_reg_t wm0_pipe_reg[] = {
4641 [PIPE_A] = WM0_PIPEA_ILK,
4642 [PIPE_B] = WM0_PIPEB_ILK,
4643 [PIPE_C] = WM0_PIPEC_IVB,
4646 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
4647 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4648 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
4650 memset(active, 0, sizeof(*active));
4652 active->pipe_enabled = intel_crtc->active;
4654 if (active->pipe_enabled) {
4655 u32 tmp = hw->wm_pipe[pipe];
4658 * For active pipes LP0 watermark is marked as
4659 * enabled, and LP1+ watermaks as disabled since
4660 * we can't really reverse compute them in case
4661 * multiple pipes are active.
4663 active->wm[0].enable = true;
4664 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
4665 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
4666 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
4667 active->linetime = hw->wm_linetime[pipe];
4669 int level, max_level = ilk_wm_max_level(dev_priv);
4672 * For inactive pipes, all watermark levels
4673 * should be marked as enabled but zeroed,
4674 * which is what we'd compute them to.
4676 for (level = 0; level <= max_level; level++)
4677 active->wm[level].enable = true;
4680 intel_crtc->wm.active.ilk = *active;
4683 #define _FW_WM(value, plane) \
4684 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
4685 #define _FW_WM_VLV(value, plane) \
4686 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
4688 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
4689 struct vlv_wm_values *wm)
4694 for_each_pipe(dev_priv, pipe) {
4695 tmp = I915_READ(VLV_DDL(pipe));
4697 wm->ddl[pipe].plane[PLANE_PRIMARY] =
4698 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4699 wm->ddl[pipe].plane[PLANE_CURSOR] =
4700 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4701 wm->ddl[pipe].plane[PLANE_SPRITE0] =
4702 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4703 wm->ddl[pipe].plane[PLANE_SPRITE1] =
4704 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4707 tmp = I915_READ(DSPFW1);
4708 wm->sr.plane = _FW_WM(tmp, SR);
4709 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
4710 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
4711 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
4713 tmp = I915_READ(DSPFW2);
4714 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
4715 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
4716 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
4718 tmp = I915_READ(DSPFW3);
4719 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
4721 if (IS_CHERRYVIEW(dev_priv)) {
4722 tmp = I915_READ(DSPFW7_CHV);
4723 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
4724 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
4726 tmp = I915_READ(DSPFW8_CHV);
4727 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
4728 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
4730 tmp = I915_READ(DSPFW9_CHV);
4731 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
4732 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
4734 tmp = I915_READ(DSPHOWM);
4735 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4736 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
4737 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
4738 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
4739 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4740 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4741 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
4742 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4743 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4744 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
4746 tmp = I915_READ(DSPFW7);
4747 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
4748 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
4750 tmp = I915_READ(DSPHOWM);
4751 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4752 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4753 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4754 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
4755 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4756 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4757 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
4764 void vlv_wm_get_hw_state(struct drm_device *dev)
4766 struct drm_i915_private *dev_priv = to_i915(dev);
4767 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
4768 struct intel_crtc *crtc;
4771 vlv_read_wm_values(dev_priv, wm);
4773 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4774 wm->level = VLV_WM_LEVEL_PM2;
4776 if (IS_CHERRYVIEW(dev_priv)) {
4777 mutex_lock(&dev_priv->rps.hw_lock);
4779 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4780 if (val & DSP_MAXFIFO_PM5_ENABLE)
4781 wm->level = VLV_WM_LEVEL_PM5;
4784 * If DDR DVFS is disabled in the BIOS, Punit
4785 * will never ack the request. So if that happens
4786 * assume we don't have to enable/disable DDR DVFS
4787 * dynamically. To test that just set the REQ_ACK
4788 * bit to poke the Punit, but don't change the
4789 * HIGH/LOW bits so that we don't actually change
4790 * the current state.
4792 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4793 val |= FORCE_DDR_FREQ_REQ_ACK;
4794 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
4796 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
4797 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
4798 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
4799 "assuming DDR DVFS is disabled\n");
4800 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
4802 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4803 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
4804 wm->level = VLV_WM_LEVEL_DDR_DVFS;
4807 mutex_unlock(&dev_priv->rps.hw_lock);
4810 for_each_intel_crtc(dev, crtc) {
4811 struct intel_crtc_state *crtc_state =
4812 to_intel_crtc_state(crtc->base.state);
4813 struct vlv_wm_state *active = &crtc->wm.active.vlv;
4814 const struct vlv_fifo_state *fifo_state =
4815 &crtc_state->wm.vlv.fifo_state;
4816 enum pipe pipe = crtc->pipe;
4817 enum plane_id plane_id;
4820 vlv_get_fifo_size(crtc_state);
4822 active->num_levels = wm->level + 1;
4823 active->cxsr = wm->cxsr;
4825 for (level = 0; level < active->num_levels; level++) {
4826 struct g4x_pipe_wm *raw =
4827 &crtc_state->wm.vlv.raw[level];
4829 active->sr[level].plane = wm->sr.plane;
4830 active->sr[level].cursor = wm->sr.cursor;
4832 for_each_plane_id_on_crtc(crtc, plane_id) {
4833 active->wm[level].plane[plane_id] =
4834 wm->pipe[pipe].plane[plane_id];
4836 raw->plane[plane_id] =
4837 vlv_invert_wm_value(active->wm[level].plane[plane_id],
4838 fifo_state->plane[plane_id]);
4842 for_each_plane_id_on_crtc(crtc, plane_id)
4843 vlv_raw_plane_wm_set(crtc_state, level,
4844 plane_id, USHRT_MAX);
4845 vlv_invalidate_wms(crtc, active, level);
4847 crtc_state->wm.vlv.optimal = *active;
4848 crtc_state->wm.vlv.intermediate = *active;
4850 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4852 wm->pipe[pipe].plane[PLANE_PRIMARY],
4853 wm->pipe[pipe].plane[PLANE_CURSOR],
4854 wm->pipe[pipe].plane[PLANE_SPRITE0],
4855 wm->pipe[pipe].plane[PLANE_SPRITE1]);
4858 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4859 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4862 void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
4864 struct intel_plane *plane;
4865 struct intel_crtc *crtc;
4867 mutex_lock(&dev_priv->wm.wm_mutex);
4869 for_each_intel_plane(&dev_priv->drm, plane) {
4870 struct intel_crtc *crtc =
4871 intel_get_crtc_for_pipe(dev_priv, plane->pipe);
4872 struct intel_crtc_state *crtc_state =
4873 to_intel_crtc_state(crtc->base.state);
4874 struct intel_plane_state *plane_state =
4875 to_intel_plane_state(plane->base.state);
4876 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
4877 const struct vlv_fifo_state *fifo_state =
4878 &crtc_state->wm.vlv.fifo_state;
4879 enum plane_id plane_id = plane->id;
4882 if (plane_state->base.visible)
4885 for (level = 0; level < wm_state->num_levels; level++) {
4886 struct g4x_pipe_wm *raw =
4887 &crtc_state->wm.vlv.raw[level];
4889 raw->plane[plane_id] = 0;
4891 wm_state->wm[level].plane[plane_id] =
4892 vlv_invert_wm_value(raw->plane[plane_id],
4893 fifo_state->plane[plane_id]);
4897 for_each_intel_crtc(&dev_priv->drm, crtc) {
4898 struct intel_crtc_state *crtc_state =
4899 to_intel_crtc_state(crtc->base.state);
4901 crtc_state->wm.vlv.intermediate =
4902 crtc_state->wm.vlv.optimal;
4903 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
4906 vlv_program_watermarks(dev_priv);
4908 mutex_unlock(&dev_priv->wm.wm_mutex);
4911 void ilk_wm_get_hw_state(struct drm_device *dev)
4913 struct drm_i915_private *dev_priv = to_i915(dev);
4914 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4915 struct drm_crtc *crtc;
4917 for_each_crtc(dev, crtc)
4918 ilk_pipe_wm_get_hw_state(crtc);
4920 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4921 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4922 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4924 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
4925 if (INTEL_GEN(dev_priv) >= 7) {
4926 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4927 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4930 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4931 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4932 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4933 else if (IS_IVYBRIDGE(dev_priv))
4934 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4935 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4938 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4942 * intel_update_watermarks - update FIFO watermark values based on current modes
4944 * Calculate watermark values for the various WM regs based on current mode
4945 * and plane configuration.
4947 * There are several cases to deal with here:
4948 * - normal (i.e. non-self-refresh)
4949 * - self-refresh (SR) mode
4950 * - lines are large relative to FIFO size (buffer can hold up to 2)
4951 * - lines are small relative to FIFO size (buffer can hold more than 2
4952 * lines), so need to account for TLB latency
4954 * The normal calculation is:
4955 * watermark = dotclock * bytes per pixel * latency
4956 * where latency is platform & configuration dependent (we assume pessimal
4959 * The SR calculation is:
4960 * watermark = (trunc(latency/line time)+1) * surface width *
4963 * line time = htotal / dotclock
4964 * surface width = hdisplay for normal plane and 64 for cursor
4965 * and latency is assumed to be high, as above.
4967 * The final value programmed to the register should always be rounded up,
4968 * and include an extra 2 entries to account for clock crossings.
4970 * We don't use the sprite, so we can ignore that. And on Crestline we have
4971 * to set the non-SR watermarks to 8.
4973 void intel_update_watermarks(struct intel_crtc *crtc)
4975 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4977 if (dev_priv->display.update_wm)
4978 dev_priv->display.update_wm(crtc);
4982 * Lock protecting IPS related data structures
4984 DEFINE_SPINLOCK(mchdev_lock);
4986 /* Global for IPS driver to get at the current i915 device. Protected by
4988 static struct drm_i915_private *i915_mch_dev;
4990 bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
4994 lockdep_assert_held(&mchdev_lock);
4996 rgvswctl = I915_READ16(MEMSWCTL);
4997 if (rgvswctl & MEMCTL_CMD_STS) {
4998 DRM_DEBUG("gpu busy, RCS change rejected\n");
4999 return false; /* still busy with another command */
5002 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
5003 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
5004 I915_WRITE16(MEMSWCTL, rgvswctl);
5005 POSTING_READ16(MEMSWCTL);
5007 rgvswctl |= MEMCTL_CMD_STS;
5008 I915_WRITE16(MEMSWCTL, rgvswctl);
5013 static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
5016 u8 fmax, fmin, fstart, vstart;
5018 spin_lock_irq(&mchdev_lock);
5020 rgvmodectl = I915_READ(MEMMODECTL);
5022 /* Enable temp reporting */
5023 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
5024 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
5026 /* 100ms RC evaluation intervals */
5027 I915_WRITE(RCUPEI, 100000);
5028 I915_WRITE(RCDNEI, 100000);
5030 /* Set max/min thresholds to 90ms and 80ms respectively */
5031 I915_WRITE(RCBMAXAVG, 90000);
5032 I915_WRITE(RCBMINAVG, 80000);
5034 I915_WRITE(MEMIHYST, 1);
5036 /* Set up min, max, and cur for interrupt handling */
5037 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
5038 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
5039 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
5040 MEMMODE_FSTART_SHIFT;
5042 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
5045 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
5046 dev_priv->ips.fstart = fstart;
5048 dev_priv->ips.max_delay = fstart;
5049 dev_priv->ips.min_delay = fmin;
5050 dev_priv->ips.cur_delay = fstart;
5052 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
5053 fmax, fmin, fstart);
5055 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
5058 * Interrupts will be enabled in ironlake_irq_postinstall
5061 I915_WRITE(VIDSTART, vstart);
5062 POSTING_READ(VIDSTART);
5064 rgvmodectl |= MEMMODE_SWMODE_EN;
5065 I915_WRITE(MEMMODECTL, rgvmodectl);
5067 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
5068 DRM_ERROR("stuck trying to change perf mode\n");
5071 ironlake_set_drps(dev_priv, fstart);
5073 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
5074 I915_READ(DDREC) + I915_READ(CSIEC);
5075 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
5076 dev_priv->ips.last_count2 = I915_READ(GFXEC);
5077 dev_priv->ips.last_time2 = ktime_get_raw_ns();
5079 spin_unlock_irq(&mchdev_lock);
5082 static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
5086 spin_lock_irq(&mchdev_lock);
5088 rgvswctl = I915_READ16(MEMSWCTL);
5090 /* Ack interrupts, disable EFC interrupt */
5091 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
5092 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
5093 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
5094 I915_WRITE(DEIIR, DE_PCU_EVENT);
5095 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
5097 /* Go back to the starting frequency */
5098 ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
5100 rgvswctl |= MEMCTL_CMD_STS;
5101 I915_WRITE(MEMSWCTL, rgvswctl);
5104 spin_unlock_irq(&mchdev_lock);
5107 /* There's a funny hw issue where the hw returns all 0 when reading from
5108 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
5109 * ourselves, instead of doing a rmw cycle (which might result in us clearing
5110 * all limits and the gpu stuck at whatever frequency it is at atm).
5112 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
5116 /* Only set the down limit when we've reached the lowest level to avoid
5117 * getting more interrupts, otherwise leave this clear. This prevents a
5118 * race in the hw when coming out of rc6: There's a tiny window where
5119 * the hw runs at the minimal clock before selecting the desired
5120 * frequency, if the down threshold expires in that window we will not
5121 * receive a down interrupt. */
5122 if (IS_GEN9(dev_priv)) {
5123 limits = (dev_priv->rps.max_freq_softlimit) << 23;
5124 if (val <= dev_priv->rps.min_freq_softlimit)
5125 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
5127 limits = dev_priv->rps.max_freq_softlimit << 24;
5128 if (val <= dev_priv->rps.min_freq_softlimit)
5129 limits |= dev_priv->rps.min_freq_softlimit << 16;
5135 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
5138 u32 threshold_up = 0, threshold_down = 0; /* in % */
5139 u32 ei_up = 0, ei_down = 0;
5141 new_power = dev_priv->rps.power;
5142 switch (dev_priv->rps.power) {
5144 if (val > dev_priv->rps.efficient_freq + 1 &&
5145 val > dev_priv->rps.cur_freq)
5146 new_power = BETWEEN;
5150 if (val <= dev_priv->rps.efficient_freq &&
5151 val < dev_priv->rps.cur_freq)
5152 new_power = LOW_POWER;
5153 else if (val >= dev_priv->rps.rp0_freq &&
5154 val > dev_priv->rps.cur_freq)
5155 new_power = HIGH_POWER;
5159 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
5160 val < dev_priv->rps.cur_freq)
5161 new_power = BETWEEN;
5164 /* Max/min bins are special */
5165 if (val <= dev_priv->rps.min_freq_softlimit)
5166 new_power = LOW_POWER;
5167 if (val >= dev_priv->rps.max_freq_softlimit)
5168 new_power = HIGH_POWER;
5169 if (new_power == dev_priv->rps.power)
5172 /* Note the units here are not exactly 1us, but 1280ns. */
5173 switch (new_power) {
5175 /* Upclock if more than 95% busy over 16ms */
5179 /* Downclock if less than 85% busy over 32ms */
5181 threshold_down = 85;
5185 /* Upclock if more than 90% busy over 13ms */
5189 /* Downclock if less than 75% busy over 32ms */
5191 threshold_down = 75;
5195 /* Upclock if more than 85% busy over 10ms */
5199 /* Downclock if less than 60% busy over 32ms */
5201 threshold_down = 60;
5205 /* When byt can survive without system hang with dynamic
5206 * sw freq adjustments, this restriction can be lifted.
5208 if (IS_VALLEYVIEW(dev_priv))
5211 I915_WRITE(GEN6_RP_UP_EI,
5212 GT_INTERVAL_FROM_US(dev_priv, ei_up));
5213 I915_WRITE(GEN6_RP_UP_THRESHOLD,
5214 GT_INTERVAL_FROM_US(dev_priv,
5215 ei_up * threshold_up / 100));
5217 I915_WRITE(GEN6_RP_DOWN_EI,
5218 GT_INTERVAL_FROM_US(dev_priv, ei_down));
5219 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
5220 GT_INTERVAL_FROM_US(dev_priv,
5221 ei_down * threshold_down / 100));
5223 I915_WRITE(GEN6_RP_CONTROL,
5224 GEN6_RP_MEDIA_TURBO |
5225 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5226 GEN6_RP_MEDIA_IS_GFX |
5228 GEN6_RP_UP_BUSY_AVG |
5229 GEN6_RP_DOWN_IDLE_AVG);
5232 dev_priv->rps.power = new_power;
5233 dev_priv->rps.up_threshold = threshold_up;
5234 dev_priv->rps.down_threshold = threshold_down;
5235 dev_priv->rps.last_adj = 0;
5238 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
5242 /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
5243 if (val > dev_priv->rps.min_freq_softlimit)
5244 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
5245 if (val < dev_priv->rps.max_freq_softlimit)
5246 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
5248 mask &= dev_priv->pm_rps_events;
5250 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
5253 /* gen6_set_rps is called to update the frequency request, but should also be
5254 * called when the range (min_delay and max_delay) is modified so that we can
5255 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
5256 static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
5258 /* min/max delay may still have been modified so be sure to
5259 * write the limits value.
5261 if (val != dev_priv->rps.cur_freq) {
5262 gen6_set_rps_thresholds(dev_priv, val);
5264 if (IS_GEN9(dev_priv))
5265 I915_WRITE(GEN6_RPNSWREQ,
5266 GEN9_FREQUENCY(val));
5267 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5268 I915_WRITE(GEN6_RPNSWREQ,
5269 HSW_FREQUENCY(val));
5271 I915_WRITE(GEN6_RPNSWREQ,
5272 GEN6_FREQUENCY(val) |
5274 GEN6_AGGRESSIVE_TURBO);
5277 /* Make sure we continue to get interrupts
5278 * until we hit the minimum or maximum frequencies.
5280 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
5281 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
5283 dev_priv->rps.cur_freq = val;
5284 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
5289 static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
5293 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
5294 "Odd GPU freq value\n"))
5297 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
5299 if (val != dev_priv->rps.cur_freq) {
5300 err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
5304 gen6_set_rps_thresholds(dev_priv, val);
5307 dev_priv->rps.cur_freq = val;
5308 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
5313 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
5315 * * If Gfx is Idle, then
5316 * 1. Forcewake Media well.
5317 * 2. Request idle freq.
5318 * 3. Release Forcewake of Media well.
5320 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
5322 u32 val = dev_priv->rps.idle_freq;
5325 if (dev_priv->rps.cur_freq <= val)
5328 /* The punit delays the write of the frequency and voltage until it
5329 * determines the GPU is awake. During normal usage we don't want to
5330 * waste power changing the frequency if the GPU is sleeping (rc6).
5331 * However, the GPU and driver is now idle and we do not want to delay
5332 * switching to minimum voltage (reducing power whilst idle) as we do
5333 * not expect to be woken in the near future and so must flush the
5334 * change by waking the device.
5336 * We choose to take the media powerwell (either would do to trick the
5337 * punit into committing the voltage change) as that takes a lot less
5338 * power than the render powerwell.
5340 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
5341 err = valleyview_set_rps(dev_priv, val);
5342 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
5345 DRM_ERROR("Failed to set RPS for idle\n");
5348 void gen6_rps_busy(struct drm_i915_private *dev_priv)
5350 mutex_lock(&dev_priv->rps.hw_lock);
5351 if (dev_priv->rps.enabled) {
5354 if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
5355 gen6_rps_reset_ei(dev_priv);
5356 I915_WRITE(GEN6_PMINTRMSK,
5357 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
5359 gen6_enable_rps_interrupts(dev_priv);
5361 /* Use the user's desired frequency as a guide, but for better
5362 * performance, jump directly to RPe as our starting frequency.
5364 freq = max(dev_priv->rps.cur_freq,
5365 dev_priv->rps.efficient_freq);
5367 if (intel_set_rps(dev_priv,
5369 dev_priv->rps.min_freq_softlimit,
5370 dev_priv->rps.max_freq_softlimit)))
5371 DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
5373 mutex_unlock(&dev_priv->rps.hw_lock);
5376 void gen6_rps_idle(struct drm_i915_private *dev_priv)
5378 /* Flush our bottom-half so that it does not race with us
5379 * setting the idle frequency and so that it is bounded by
5380 * our rpm wakeref. And then disable the interrupts to stop any
5381 * futher RPS reclocking whilst we are asleep.
5383 gen6_disable_rps_interrupts(dev_priv);
5385 mutex_lock(&dev_priv->rps.hw_lock);
5386 if (dev_priv->rps.enabled) {
5387 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5388 vlv_set_rps_idle(dev_priv);
5390 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
5391 dev_priv->rps.last_adj = 0;
5392 I915_WRITE(GEN6_PMINTRMSK,
5393 gen6_sanitize_rps_pm_mask(dev_priv, ~0));
5395 mutex_unlock(&dev_priv->rps.hw_lock);
5397 spin_lock(&dev_priv->rps.client_lock);
5398 while (!list_empty(&dev_priv->rps.clients))
5399 list_del_init(dev_priv->rps.clients.next);
5400 spin_unlock(&dev_priv->rps.client_lock);
5403 void gen6_rps_boost(struct drm_i915_private *dev_priv,
5404 struct intel_rps_client *rps,
5405 unsigned long submitted)
5407 /* This is intentionally racy! We peek at the state here, then
5408 * validate inside the RPS worker.
5410 if (!(dev_priv->gt.awake &&
5411 dev_priv->rps.enabled &&
5412 dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
5415 /* Force a RPS boost (and don't count it against the client) if
5416 * the GPU is severely congested.
5418 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
5421 spin_lock(&dev_priv->rps.client_lock);
5422 if (rps == NULL || list_empty(&rps->link)) {
5423 spin_lock_irq(&dev_priv->irq_lock);
5424 if (dev_priv->rps.interrupts_enabled) {
5425 dev_priv->rps.client_boost = true;
5426 schedule_work(&dev_priv->rps.work);
5428 spin_unlock_irq(&dev_priv->irq_lock);
5431 list_add(&rps->link, &dev_priv->rps.clients);
5434 dev_priv->rps.boosts++;
5436 spin_unlock(&dev_priv->rps.client_lock);
5439 int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
5443 lockdep_assert_held(&dev_priv->rps.hw_lock);
5444 GEM_BUG_ON(val > dev_priv->rps.max_freq);
5445 GEM_BUG_ON(val < dev_priv->rps.min_freq);
5447 if (!dev_priv->rps.enabled) {
5448 dev_priv->rps.cur_freq = val;
5452 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5453 err = valleyview_set_rps(dev_priv, val);
5455 err = gen6_set_rps(dev_priv, val);
5460 static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
5462 I915_WRITE(GEN6_RC_CONTROL, 0);
5463 I915_WRITE(GEN9_PG_ENABLE, 0);
5466 static void gen9_disable_rps(struct drm_i915_private *dev_priv)
5468 I915_WRITE(GEN6_RP_CONTROL, 0);
5471 static void gen6_disable_rps(struct drm_i915_private *dev_priv)
5473 I915_WRITE(GEN6_RC_CONTROL, 0);
5474 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
5475 I915_WRITE(GEN6_RP_CONTROL, 0);
5478 static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
5480 I915_WRITE(GEN6_RC_CONTROL, 0);
5483 static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
5485 /* we're doing forcewake before Disabling RC6,
5486 * This what the BIOS expects when going into suspend */
5487 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5489 I915_WRITE(GEN6_RC_CONTROL, 0);
5491 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5494 static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
5496 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5497 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
5498 mode = GEN6_RC_CTL_RC6_ENABLE;
5502 if (HAS_RC6p(dev_priv))
5503 DRM_DEBUG_DRIVER("Enabling RC6 states: "
5504 "RC6 %s RC6p %s RC6pp %s\n",
5505 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
5506 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
5507 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
5510 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
5511 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
5514 static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
5516 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5517 bool enable_rc6 = true;
5518 unsigned long rc6_ctx_base;
5522 rc_ctl = I915_READ(GEN6_RC_CONTROL);
5523 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
5524 RC_SW_TARGET_STATE_SHIFT;
5525 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
5526 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
5527 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
5528 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
5531 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
5532 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
5537 * The exact context size is not known for BXT, so assume a page size
5540 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
5541 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
5542 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
5543 ggtt->stolen_reserved_size))) {
5544 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
5548 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
5549 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
5550 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
5551 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
5552 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
5556 if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
5557 !I915_READ(GEN8_PUSHBUS_ENABLE) ||
5558 !I915_READ(GEN8_PUSHBUS_SHIFT)) {
5559 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
5563 if (!I915_READ(GEN6_GFXPAUSE)) {
5564 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
5568 if (!I915_READ(GEN8_MISC_CTRL0)) {
5569 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
5576 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
5578 /* No RC6 before Ironlake and code is gone for ilk. */
5579 if (INTEL_INFO(dev_priv)->gen < 6)
5585 if (IS_GEN9_LP(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
5586 DRM_INFO("RC6 disabled by BIOS\n");
5590 /* Respect the kernel parameter if it is set */
5591 if (enable_rc6 >= 0) {
5594 if (HAS_RC6p(dev_priv))
5595 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
5598 mask = INTEL_RC6_ENABLE;
5600 if ((enable_rc6 & mask) != enable_rc6)
5601 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
5602 "(requested %d, valid %d)\n",
5603 enable_rc6 & mask, enable_rc6, mask);
5605 return enable_rc6 & mask;
5608 if (IS_IVYBRIDGE(dev_priv))
5609 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
5611 return INTEL_RC6_ENABLE;
5614 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
5616 /* All of these values are in units of 50MHz */
5618 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
5619 if (IS_GEN9_LP(dev_priv)) {
5620 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
5621 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
5622 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
5623 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
5625 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
5626 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
5627 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
5628 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
5630 /* hw_max = RP0 until we check for overclocking */
5631 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
5633 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
5634 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
5635 IS_GEN9_BC(dev_priv)) {
5636 u32 ddcc_status = 0;
5638 if (sandybridge_pcode_read(dev_priv,
5639 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
5641 dev_priv->rps.efficient_freq =
5643 ((ddcc_status >> 8) & 0xff),
5644 dev_priv->rps.min_freq,
5645 dev_priv->rps.max_freq);
5648 if (IS_GEN9_BC(dev_priv)) {
5649 /* Store the frequency values in 16.66 MHZ units, which is
5650 * the natural hardware unit for SKL
5652 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
5653 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
5654 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
5655 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
5656 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
5660 static void reset_rps(struct drm_i915_private *dev_priv,
5661 int (*set)(struct drm_i915_private *, u8))
5663 u8 freq = dev_priv->rps.cur_freq;
5666 dev_priv->rps.power = -1;
5667 dev_priv->rps.cur_freq = -1;
5669 if (set(dev_priv, freq))
5670 DRM_ERROR("Failed to reset RPS to initial values\n");
5673 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
5674 static void gen9_enable_rps(struct drm_i915_private *dev_priv)
5676 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5678 /* Program defaults and thresholds for RPS*/
5679 I915_WRITE(GEN6_RC_VIDEO_FREQ,
5680 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
5682 /* 1 second timeout*/
5683 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
5684 GT_INTERVAL_FROM_US(dev_priv, 1000000));
5686 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
5688 /* Leaning on the below call to gen6_set_rps to program/setup the
5689 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
5690 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
5691 reset_rps(dev_priv, gen6_set_rps);
5693 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5696 static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
5698 struct intel_engine_cs *engine;
5699 enum intel_engine_id id;
5700 uint32_t rc6_mask = 0;
5702 /* 1a: Software RC state - RC0 */
5703 I915_WRITE(GEN6_RC_STATE, 0);
5705 /* 1b: Get forcewake during program sequence. Although the driver
5706 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5707 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5709 /* 2a: Disable RC states. */
5710 I915_WRITE(GEN6_RC_CONTROL, 0);
5712 /* 2b: Program RC6 thresholds.*/
5714 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
5715 if (IS_SKYLAKE(dev_priv))
5716 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
5718 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
5719 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5720 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5721 for_each_engine(engine, dev_priv, id)
5722 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5724 if (HAS_GUC(dev_priv))
5725 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
5727 I915_WRITE(GEN6_RC_SLEEP, 0);
5729 /* 2c: Program Coarse Power Gating Policies. */
5730 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
5731 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
5733 /* 3a: Enable RC6 */
5734 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5735 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
5736 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
5737 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
5738 I915_WRITE(GEN6_RC_CONTROL,
5739 GEN6_RC_CTL_HW_ENABLE | GEN6_RC_CTL_EI_MODE(1) | rc6_mask);
5742 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
5743 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
5745 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
5746 I915_WRITE(GEN9_PG_ENABLE, 0);
5748 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
5749 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
5751 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5754 static void gen8_enable_rps(struct drm_i915_private *dev_priv)
5756 struct intel_engine_cs *engine;
5757 enum intel_engine_id id;
5758 uint32_t rc6_mask = 0;
5760 /* 1a: Software RC state - RC0 */
5761 I915_WRITE(GEN6_RC_STATE, 0);
5763 /* 1c & 1d: Get forcewake during program sequence. Although the driver
5764 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5765 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5767 /* 2a: Disable RC states. */
5768 I915_WRITE(GEN6_RC_CONTROL, 0);
5770 /* 2b: Program RC6 thresholds.*/
5771 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5772 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5773 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5774 for_each_engine(engine, dev_priv, id)
5775 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5776 I915_WRITE(GEN6_RC_SLEEP, 0);
5777 if (IS_BROADWELL(dev_priv))
5778 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
5780 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
5783 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5784 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
5785 intel_print_rc6_info(dev_priv, rc6_mask);
5786 if (IS_BROADWELL(dev_priv))
5787 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5788 GEN7_RC_CTL_TO_MODE |
5791 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5792 GEN6_RC_CTL_EI_MODE(1) |
5795 /* 4 Program defaults and thresholds for RPS*/
5796 I915_WRITE(GEN6_RPNSWREQ,
5797 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5798 I915_WRITE(GEN6_RC_VIDEO_FREQ,
5799 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5800 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
5801 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
5803 /* Docs recommend 900MHz, and 300 MHz respectively */
5804 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
5805 dev_priv->rps.max_freq_softlimit << 24 |
5806 dev_priv->rps.min_freq_softlimit << 16);
5808 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
5809 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
5810 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
5811 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
5813 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5816 I915_WRITE(GEN6_RP_CONTROL,
5817 GEN6_RP_MEDIA_TURBO |
5818 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5819 GEN6_RP_MEDIA_IS_GFX |
5821 GEN6_RP_UP_BUSY_AVG |
5822 GEN6_RP_DOWN_IDLE_AVG);
5824 /* 6: Ring frequency + overclocking (our driver does this later */
5826 reset_rps(dev_priv, gen6_set_rps);
5828 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5831 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
5833 struct intel_engine_cs *engine;
5834 enum intel_engine_id id;
5835 u32 rc6vids, rc6_mask = 0;
5840 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5842 /* Here begins a magic sequence of register writes to enable
5843 * auto-downclocking.
5845 * Perhaps there might be some value in exposing these to
5848 I915_WRITE(GEN6_RC_STATE, 0);
5850 /* Clear the DBG now so we don't confuse earlier errors */
5851 gtfifodbg = I915_READ(GTFIFODBG);
5853 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
5854 I915_WRITE(GTFIFODBG, gtfifodbg);
5857 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5859 /* disable the counters and set deterministic thresholds */
5860 I915_WRITE(GEN6_RC_CONTROL, 0);
5862 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
5863 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
5864 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
5865 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5866 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5868 for_each_engine(engine, dev_priv, id)
5869 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5871 I915_WRITE(GEN6_RC_SLEEP, 0);
5872 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
5873 if (IS_IVYBRIDGE(dev_priv))
5874 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
5876 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
5877 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
5878 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
5880 /* Check if we are enabling RC6 */
5881 rc6_mode = intel_enable_rc6();
5882 if (rc6_mode & INTEL_RC6_ENABLE)
5883 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
5885 /* We don't use those on Haswell */
5886 if (!IS_HASWELL(dev_priv)) {
5887 if (rc6_mode & INTEL_RC6p_ENABLE)
5888 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
5890 if (rc6_mode & INTEL_RC6pp_ENABLE)
5891 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
5894 intel_print_rc6_info(dev_priv, rc6_mask);
5896 I915_WRITE(GEN6_RC_CONTROL,
5898 GEN6_RC_CTL_EI_MODE(1) |
5899 GEN6_RC_CTL_HW_ENABLE);
5901 /* Power down if completely idle for over 50ms */
5902 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
5903 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5905 reset_rps(dev_priv, gen6_set_rps);
5908 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5909 if (IS_GEN6(dev_priv) && ret) {
5910 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5911 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5912 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5913 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5914 rc6vids &= 0xffff00;
5915 rc6vids |= GEN6_ENCODE_RC6_VID(450);
5916 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
5918 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5921 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5924 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5927 unsigned int gpu_freq;
5928 unsigned int max_ia_freq, min_ring_freq;
5929 unsigned int max_gpu_freq, min_gpu_freq;
5930 int scaling_factor = 180;
5931 struct cpufreq_policy *policy;
5933 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5935 policy = cpufreq_cpu_get(0);
5937 max_ia_freq = policy->cpuinfo.max_freq;
5938 cpufreq_cpu_put(policy);
5941 * Default to measured freq if none found, PCU will ensure we
5944 max_ia_freq = tsc_khz;
5947 /* Convert from kHz to MHz */
5948 max_ia_freq /= 1000;
5950 min_ring_freq = I915_READ(DCLK) & 0xf;
5951 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5952 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
5954 if (IS_GEN9_BC(dev_priv)) {
5955 /* Convert GT frequency to 50 HZ units */
5956 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5957 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5959 min_gpu_freq = dev_priv->rps.min_freq;
5960 max_gpu_freq = dev_priv->rps.max_freq;
5964 * For each potential GPU frequency, load a ring frequency we'd like
5965 * to use for memory access. We do this by specifying the IA frequency
5966 * the PCU should use as a reference to determine the ring frequency.
5968 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
5969 int diff = max_gpu_freq - gpu_freq;
5970 unsigned int ia_freq = 0, ring_freq = 0;
5972 if (IS_GEN9_BC(dev_priv)) {
5974 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5975 * No floor required for ring frequency on SKL.
5977 ring_freq = gpu_freq;
5978 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
5979 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5980 ring_freq = max(min_ring_freq, gpu_freq);
5981 } else if (IS_HASWELL(dev_priv)) {
5982 ring_freq = mult_frac(gpu_freq, 5, 4);
5983 ring_freq = max(min_ring_freq, ring_freq);
5984 /* leave ia_freq as the default, chosen by cpufreq */
5986 /* On older processors, there is no separate ring
5987 * clock domain, so in order to boost the bandwidth
5988 * of the ring, we need to upclock the CPU (ia_freq).
5990 * For GPU frequencies less than 750MHz,
5991 * just use the lowest ring freq.
5993 if (gpu_freq < min_freq)
5996 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
5997 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
6000 sandybridge_pcode_write(dev_priv,
6001 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
6002 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
6003 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
6008 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
6012 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
6014 switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
6016 /* (2 * 4) config */
6017 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
6020 /* (2 * 6) config */
6021 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
6024 /* (2 * 8) config */
6026 /* Setting (2 * 8) Min RP0 for any other combination */
6027 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
6031 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
6036 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
6040 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
6041 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
6046 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
6050 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
6051 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
6056 static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
6060 val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
6061 rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
6062 FB_GFX_FREQ_FUSE_MASK);
6067 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
6071 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
6073 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
6078 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
6082 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
6084 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
6086 rp0 = min_t(u32, rp0, 0xea);
6091 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
6095 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
6096 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
6097 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
6098 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
6103 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
6107 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
6109 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
6110 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
6111 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
6112 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
6113 * to make sure it matches what Punit accepts.
6115 return max_t(u32, val, 0xc0);
6118 /* Check that the pctx buffer wasn't move under us. */
6119 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
6121 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
6123 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
6124 dev_priv->vlv_pctx->stolen->start);
6128 /* Check that the pcbr address is not empty. */
6129 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
6131 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
6133 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
6136 static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
6138 struct i915_ggtt *ggtt = &dev_priv->ggtt;
6139 unsigned long pctx_paddr, paddr;
6141 int pctx_size = 32*1024;
6143 pcbr = I915_READ(VLV_PCBR);
6144 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
6145 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
6146 paddr = (dev_priv->mm.stolen_base +
6147 (ggtt->stolen_size - pctx_size));
6149 pctx_paddr = (paddr & (~4095));
6150 I915_WRITE(VLV_PCBR, pctx_paddr);
6153 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
6156 static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
6158 struct drm_i915_gem_object *pctx;
6159 unsigned long pctx_paddr;
6161 int pctx_size = 24*1024;
6163 pcbr = I915_READ(VLV_PCBR);
6165 /* BIOS set it up already, grab the pre-alloc'd space */
6168 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
6169 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
6171 I915_GTT_OFFSET_NONE,
6176 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
6179 * From the Gunit register HAS:
6180 * The Gfx driver is expected to program this register and ensure
6181 * proper allocation within Gfx stolen memory. For example, this
6182 * register should be programmed such than the PCBR range does not
6183 * overlap with other ranges, such as the frame buffer, protected
6184 * memory, or any other relevant ranges.
6186 pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
6188 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
6192 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
6193 I915_WRITE(VLV_PCBR, pctx_paddr);
6196 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
6197 dev_priv->vlv_pctx = pctx;
6200 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
6202 if (WARN_ON(!dev_priv->vlv_pctx))
6205 i915_gem_object_put(dev_priv->vlv_pctx);
6206 dev_priv->vlv_pctx = NULL;
6209 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
6211 dev_priv->rps.gpll_ref_freq =
6212 vlv_get_cck_clock(dev_priv, "GPLL ref",
6213 CCK_GPLL_CLOCK_CONTROL,
6214 dev_priv->czclk_freq);
6216 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
6217 dev_priv->rps.gpll_ref_freq);
6220 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
6224 valleyview_setup_pctx(dev_priv);
6226 vlv_init_gpll_ref_freq(dev_priv);
6228 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
6229 switch ((val >> 6) & 3) {
6232 dev_priv->mem_freq = 800;
6235 dev_priv->mem_freq = 1066;
6238 dev_priv->mem_freq = 1333;
6241 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
6243 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
6244 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
6245 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
6246 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
6247 dev_priv->rps.max_freq);
6249 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
6250 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
6251 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
6252 dev_priv->rps.efficient_freq);
6254 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
6255 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
6256 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
6257 dev_priv->rps.rp1_freq);
6259 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
6260 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
6261 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
6262 dev_priv->rps.min_freq);
6265 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
6269 cherryview_setup_pctx(dev_priv);
6271 vlv_init_gpll_ref_freq(dev_priv);
6273 mutex_lock(&dev_priv->sb_lock);
6274 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
6275 mutex_unlock(&dev_priv->sb_lock);
6277 switch ((val >> 2) & 0x7) {
6279 dev_priv->mem_freq = 2000;
6282 dev_priv->mem_freq = 1600;
6285 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
6287 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
6288 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
6289 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
6290 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
6291 dev_priv->rps.max_freq);
6293 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
6294 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
6295 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
6296 dev_priv->rps.efficient_freq);
6298 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
6299 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
6300 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
6301 dev_priv->rps.rp1_freq);
6303 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
6304 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
6305 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
6306 dev_priv->rps.min_freq);
6308 WARN_ONCE((dev_priv->rps.max_freq |
6309 dev_priv->rps.efficient_freq |
6310 dev_priv->rps.rp1_freq |
6311 dev_priv->rps.min_freq) & 1,
6312 "Odd GPU freq values\n");
6315 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6317 valleyview_cleanup_pctx(dev_priv);
6320 static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
6322 struct intel_engine_cs *engine;
6323 enum intel_engine_id id;
6324 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
6326 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6328 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
6329 GT_FIFO_FREE_ENTRIES_CHV);
6331 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
6333 I915_WRITE(GTFIFODBG, gtfifodbg);
6336 cherryview_check_pctx(dev_priv);
6338 /* 1a & 1b: Get forcewake during program sequence. Although the driver
6339 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6340 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6342 /* Disable RC states. */
6343 I915_WRITE(GEN6_RC_CONTROL, 0);
6345 /* 2a: Program RC6 thresholds.*/
6346 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
6347 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
6348 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
6350 for_each_engine(engine, dev_priv, id)
6351 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6352 I915_WRITE(GEN6_RC_SLEEP, 0);
6354 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
6355 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
6357 /* allows RC6 residency counter to work */
6358 I915_WRITE(VLV_COUNTER_CONTROL,
6359 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
6360 VLV_MEDIA_RC6_COUNT_EN |
6361 VLV_RENDER_RC6_COUNT_EN));
6363 /* For now we assume BIOS is allocating and populating the PCBR */
6364 pcbr = I915_READ(VLV_PCBR);
6367 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
6368 (pcbr >> VLV_PCBR_ADDR_SHIFT))
6369 rc6_mode = GEN7_RC_CTL_TO_MODE;
6371 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
6373 /* 4 Program defaults and thresholds for RPS*/
6374 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6375 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
6376 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
6377 I915_WRITE(GEN6_RP_UP_EI, 66000);
6378 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
6380 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6383 I915_WRITE(GEN6_RP_CONTROL,
6384 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6385 GEN6_RP_MEDIA_IS_GFX |
6387 GEN6_RP_UP_BUSY_AVG |
6388 GEN6_RP_DOWN_IDLE_AVG);
6390 /* Setting Fixed Bias */
6391 val = VLV_OVERRIDE_EN |
6393 CHV_BIAS_CPU_50_SOC_50;
6394 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
6396 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
6398 /* RPS code assumes GPLL is used */
6399 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
6401 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
6402 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
6404 reset_rps(dev_priv, valleyview_set_rps);
6406 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6409 static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
6411 struct intel_engine_cs *engine;
6412 enum intel_engine_id id;
6413 u32 gtfifodbg, val, rc6_mode = 0;
6415 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6417 valleyview_check_pctx(dev_priv);
6419 gtfifodbg = I915_READ(GTFIFODBG);
6421 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
6423 I915_WRITE(GTFIFODBG, gtfifodbg);
6426 /* If VLV, Forcewake all wells, else re-direct to regular path */
6427 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6429 /* Disable RC states. */
6430 I915_WRITE(GEN6_RC_CONTROL, 0);
6432 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6433 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
6434 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
6435 I915_WRITE(GEN6_RP_UP_EI, 66000);
6436 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
6438 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6440 I915_WRITE(GEN6_RP_CONTROL,
6441 GEN6_RP_MEDIA_TURBO |
6442 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6443 GEN6_RP_MEDIA_IS_GFX |
6445 GEN6_RP_UP_BUSY_AVG |
6446 GEN6_RP_DOWN_IDLE_CONT);
6448 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
6449 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
6450 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
6452 for_each_engine(engine, dev_priv, id)
6453 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6455 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
6457 /* allows RC6 residency counter to work */
6458 I915_WRITE(VLV_COUNTER_CONTROL,
6459 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
6460 VLV_MEDIA_RC0_COUNT_EN |
6461 VLV_RENDER_RC0_COUNT_EN |
6462 VLV_MEDIA_RC6_COUNT_EN |
6463 VLV_RENDER_RC6_COUNT_EN));
6465 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
6466 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
6468 intel_print_rc6_info(dev_priv, rc6_mode);
6470 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
6472 /* Setting Fixed Bias */
6473 val = VLV_OVERRIDE_EN |
6475 VLV_BIAS_CPU_125_SOC_875;
6476 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
6478 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
6480 /* RPS code assumes GPLL is used */
6481 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
6483 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
6484 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
6486 reset_rps(dev_priv, valleyview_set_rps);
6488 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6491 static unsigned long intel_pxfreq(u32 vidfreq)
6494 int div = (vidfreq & 0x3f0000) >> 16;
6495 int post = (vidfreq & 0x3000) >> 12;
6496 int pre = (vidfreq & 0x7);
6501 freq = ((div * 133333) / ((1<<post) * pre));
6506 static const struct cparams {
6512 { 1, 1333, 301, 28664 },
6513 { 1, 1066, 294, 24460 },
6514 { 1, 800, 294, 25192 },
6515 { 0, 1333, 276, 27605 },
6516 { 0, 1066, 276, 27605 },
6517 { 0, 800, 231, 23784 },
6520 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
6522 u64 total_count, diff, ret;
6523 u32 count1, count2, count3, m = 0, c = 0;
6524 unsigned long now = jiffies_to_msecs(jiffies), diff1;
6527 lockdep_assert_held(&mchdev_lock);
6529 diff1 = now - dev_priv->ips.last_time1;
6531 /* Prevent division-by-zero if we are asking too fast.
6532 * Also, we don't get interesting results if we are polling
6533 * faster than once in 10ms, so just return the saved value
6537 return dev_priv->ips.chipset_power;
6539 count1 = I915_READ(DMIEC);
6540 count2 = I915_READ(DDREC);
6541 count3 = I915_READ(CSIEC);
6543 total_count = count1 + count2 + count3;
6545 /* FIXME: handle per-counter overflow */
6546 if (total_count < dev_priv->ips.last_count1) {
6547 diff = ~0UL - dev_priv->ips.last_count1;
6548 diff += total_count;
6550 diff = total_count - dev_priv->ips.last_count1;
6553 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
6554 if (cparams[i].i == dev_priv->ips.c_m &&
6555 cparams[i].t == dev_priv->ips.r_t) {
6562 diff = div_u64(diff, diff1);
6563 ret = ((m * diff) + c);
6564 ret = div_u64(ret, 10);
6566 dev_priv->ips.last_count1 = total_count;
6567 dev_priv->ips.last_time1 = now;
6569 dev_priv->ips.chipset_power = ret;
6574 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
6578 if (INTEL_INFO(dev_priv)->gen != 5)
6581 spin_lock_irq(&mchdev_lock);
6583 val = __i915_chipset_val(dev_priv);
6585 spin_unlock_irq(&mchdev_lock);
6590 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
6592 unsigned long m, x, b;
6595 tsfs = I915_READ(TSFS);
6597 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
6598 x = I915_READ8(TR1);
6600 b = tsfs & TSFS_INTR_MASK;
6602 return ((m * x) / 127) - b;
6605 static int _pxvid_to_vd(u8 pxvid)
6610 if (pxvid >= 8 && pxvid < 31)
6613 return (pxvid + 2) * 125;
6616 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
6618 const int vd = _pxvid_to_vd(pxvid);
6619 const int vm = vd - 1125;
6621 if (INTEL_INFO(dev_priv)->is_mobile)
6622 return vm > 0 ? vm : 0;
6627 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
6629 u64 now, diff, diffms;
6632 lockdep_assert_held(&mchdev_lock);
6634 now = ktime_get_raw_ns();
6635 diffms = now - dev_priv->ips.last_time2;
6636 do_div(diffms, NSEC_PER_MSEC);
6638 /* Don't divide by 0 */
6642 count = I915_READ(GFXEC);
6644 if (count < dev_priv->ips.last_count2) {
6645 diff = ~0UL - dev_priv->ips.last_count2;
6648 diff = count - dev_priv->ips.last_count2;
6651 dev_priv->ips.last_count2 = count;
6652 dev_priv->ips.last_time2 = now;
6654 /* More magic constants... */
6656 diff = div_u64(diff, diffms * 10);
6657 dev_priv->ips.gfx_power = diff;
6660 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
6662 if (INTEL_INFO(dev_priv)->gen != 5)
6665 spin_lock_irq(&mchdev_lock);
6667 __i915_update_gfx_val(dev_priv);
6669 spin_unlock_irq(&mchdev_lock);
6672 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
6674 unsigned long t, corr, state1, corr2, state2;
6677 lockdep_assert_held(&mchdev_lock);
6679 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
6680 pxvid = (pxvid >> 24) & 0x7f;
6681 ext_v = pvid_to_extvid(dev_priv, pxvid);
6685 t = i915_mch_val(dev_priv);
6687 /* Revel in the empirically derived constants */
6689 /* Correction factor in 1/100000 units */
6691 corr = ((t * 2349) + 135940);
6693 corr = ((t * 964) + 29317);
6695 corr = ((t * 301) + 1004);
6697 corr = corr * ((150142 * state1) / 10000 - 78642);
6699 corr2 = (corr * dev_priv->ips.corr);
6701 state2 = (corr2 * state1) / 10000;
6702 state2 /= 100; /* convert to mW */
6704 __i915_update_gfx_val(dev_priv);
6706 return dev_priv->ips.gfx_power + state2;
6709 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
6713 if (INTEL_INFO(dev_priv)->gen != 5)
6716 spin_lock_irq(&mchdev_lock);
6718 val = __i915_gfx_val(dev_priv);
6720 spin_unlock_irq(&mchdev_lock);
6726 * i915_read_mch_val - return value for IPS use
6728 * Calculate and return a value for the IPS driver to use when deciding whether
6729 * we have thermal and power headroom to increase CPU or GPU power budget.
6731 unsigned long i915_read_mch_val(void)
6733 struct drm_i915_private *dev_priv;
6734 unsigned long chipset_val, graphics_val, ret = 0;
6736 spin_lock_irq(&mchdev_lock);
6739 dev_priv = i915_mch_dev;
6741 chipset_val = __i915_chipset_val(dev_priv);
6742 graphics_val = __i915_gfx_val(dev_priv);
6744 ret = chipset_val + graphics_val;
6747 spin_unlock_irq(&mchdev_lock);
6751 EXPORT_SYMBOL_GPL(i915_read_mch_val);
6754 * i915_gpu_raise - raise GPU frequency limit
6756 * Raise the limit; IPS indicates we have thermal headroom.
6758 bool i915_gpu_raise(void)
6760 struct drm_i915_private *dev_priv;
6763 spin_lock_irq(&mchdev_lock);
6764 if (!i915_mch_dev) {
6768 dev_priv = i915_mch_dev;
6770 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
6771 dev_priv->ips.max_delay--;
6774 spin_unlock_irq(&mchdev_lock);
6778 EXPORT_SYMBOL_GPL(i915_gpu_raise);
6781 * i915_gpu_lower - lower GPU frequency limit
6783 * IPS indicates we're close to a thermal limit, so throttle back the GPU
6784 * frequency maximum.
6786 bool i915_gpu_lower(void)
6788 struct drm_i915_private *dev_priv;
6791 spin_lock_irq(&mchdev_lock);
6792 if (!i915_mch_dev) {
6796 dev_priv = i915_mch_dev;
6798 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
6799 dev_priv->ips.max_delay++;
6802 spin_unlock_irq(&mchdev_lock);
6806 EXPORT_SYMBOL_GPL(i915_gpu_lower);
6809 * i915_gpu_busy - indicate GPU business to IPS
6811 * Tell the IPS driver whether or not the GPU is busy.
6813 bool i915_gpu_busy(void)
6817 spin_lock_irq(&mchdev_lock);
6819 ret = i915_mch_dev->gt.awake;
6820 spin_unlock_irq(&mchdev_lock);
6824 EXPORT_SYMBOL_GPL(i915_gpu_busy);
6827 * i915_gpu_turbo_disable - disable graphics turbo
6829 * Disable graphics turbo by resetting the max frequency and setting the
6830 * current frequency to the default.
6832 bool i915_gpu_turbo_disable(void)
6834 struct drm_i915_private *dev_priv;
6837 spin_lock_irq(&mchdev_lock);
6838 if (!i915_mch_dev) {
6842 dev_priv = i915_mch_dev;
6844 dev_priv->ips.max_delay = dev_priv->ips.fstart;
6846 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
6850 spin_unlock_irq(&mchdev_lock);
6854 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
6857 * Tells the intel_ips driver that the i915 driver is now loaded, if
6858 * IPS got loaded first.
6860 * This awkward dance is so that neither module has to depend on the
6861 * other in order for IPS to do the appropriate communication of
6862 * GPU turbo limits to i915.
6865 ips_ping_for_i915_load(void)
6869 link = symbol_get(ips_link_to_i915_driver);
6872 symbol_put(ips_link_to_i915_driver);
6876 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
6878 /* We only register the i915 ips part with intel-ips once everything is
6879 * set up, to avoid intel-ips sneaking in and reading bogus values. */
6880 spin_lock_irq(&mchdev_lock);
6881 i915_mch_dev = dev_priv;
6882 spin_unlock_irq(&mchdev_lock);
6884 ips_ping_for_i915_load();
6887 void intel_gpu_ips_teardown(void)
6889 spin_lock_irq(&mchdev_lock);
6890 i915_mch_dev = NULL;
6891 spin_unlock_irq(&mchdev_lock);
6894 static void intel_init_emon(struct drm_i915_private *dev_priv)
6900 /* Disable to program */
6904 /* Program energy weights for various events */
6905 I915_WRITE(SDEW, 0x15040d00);
6906 I915_WRITE(CSIEW0, 0x007f0000);
6907 I915_WRITE(CSIEW1, 0x1e220004);
6908 I915_WRITE(CSIEW2, 0x04000004);
6910 for (i = 0; i < 5; i++)
6911 I915_WRITE(PEW(i), 0);
6912 for (i = 0; i < 3; i++)
6913 I915_WRITE(DEW(i), 0);
6915 /* Program P-state weights to account for frequency power adjustment */
6916 for (i = 0; i < 16; i++) {
6917 u32 pxvidfreq = I915_READ(PXVFREQ(i));
6918 unsigned long freq = intel_pxfreq(pxvidfreq);
6919 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6924 val *= (freq / 1000);
6926 val /= (127*127*900);
6928 DRM_ERROR("bad pxval: %ld\n", val);
6931 /* Render standby states get 0 weight */
6935 for (i = 0; i < 4; i++) {
6936 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6937 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6938 I915_WRITE(PXW(i), val);
6941 /* Adjust magic regs to magic values (more experimental results) */
6942 I915_WRITE(OGW0, 0);
6943 I915_WRITE(OGW1, 0);
6944 I915_WRITE(EG0, 0x00007f00);
6945 I915_WRITE(EG1, 0x0000000e);
6946 I915_WRITE(EG2, 0x000e0000);
6947 I915_WRITE(EG3, 0x68000300);
6948 I915_WRITE(EG4, 0x42000000);
6949 I915_WRITE(EG5, 0x00140031);
6953 for (i = 0; i < 8; i++)
6954 I915_WRITE(PXWL(i), 0);
6956 /* Enable PMON + select events */
6957 I915_WRITE(ECR, 0x80000019);
6959 lcfuse = I915_READ(LCFUSE02);
6961 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
6964 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
6967 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6970 if (!i915.enable_rc6) {
6971 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6972 intel_runtime_pm_get(dev_priv);
6975 mutex_lock(&dev_priv->drm.struct_mutex);
6976 mutex_lock(&dev_priv->rps.hw_lock);
6978 /* Initialize RPS limits (for userspace) */
6979 if (IS_CHERRYVIEW(dev_priv))
6980 cherryview_init_gt_powersave(dev_priv);
6981 else if (IS_VALLEYVIEW(dev_priv))
6982 valleyview_init_gt_powersave(dev_priv);
6983 else if (INTEL_GEN(dev_priv) >= 6)
6984 gen6_init_rps_frequencies(dev_priv);
6986 /* Derive initial user preferences/limits from the hardware limits */
6987 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
6988 dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
6990 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
6991 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
6993 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6994 dev_priv->rps.min_freq_softlimit =
6996 dev_priv->rps.efficient_freq,
6997 intel_freq_opcode(dev_priv, 450));
6999 /* After setting max-softlimit, find the overclock max freq */
7000 if (IS_GEN6(dev_priv) ||
7001 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
7004 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, ¶ms);
7005 if (params & BIT(31)) { /* OC supported */
7006 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
7007 (dev_priv->rps.max_freq & 0xff) * 50,
7008 (params & 0xff) * 50);
7009 dev_priv->rps.max_freq = params & 0xff;
7013 /* Finally allow us to boost to max by default */
7014 dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
7016 mutex_unlock(&dev_priv->rps.hw_lock);
7017 mutex_unlock(&dev_priv->drm.struct_mutex);
7019 intel_autoenable_gt_powersave(dev_priv);
7022 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
7024 if (IS_VALLEYVIEW(dev_priv))
7025 valleyview_cleanup_gt_powersave(dev_priv);
7027 if (!i915.enable_rc6)
7028 intel_runtime_pm_put(dev_priv);
7032 * intel_suspend_gt_powersave - suspend PM work and helper threads
7033 * @dev_priv: i915 device
7035 * We don't want to disable RC6 or other features here, we just want
7036 * to make sure any work we've queued has finished and won't bother
7037 * us while we're suspended.
7039 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
7041 if (INTEL_GEN(dev_priv) < 6)
7044 if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
7045 intel_runtime_pm_put(dev_priv);
7047 /* gen6_rps_idle() will be called later to disable interrupts */
7050 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
7052 dev_priv->rps.enabled = true; /* force disabling */
7053 intel_disable_gt_powersave(dev_priv);
7055 gen6_reset_rps_interrupts(dev_priv);
7058 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
7060 if (!READ_ONCE(dev_priv->rps.enabled))
7063 mutex_lock(&dev_priv->rps.hw_lock);
7065 if (INTEL_GEN(dev_priv) >= 9) {
7066 gen9_disable_rc6(dev_priv);
7067 gen9_disable_rps(dev_priv);
7068 } else if (IS_CHERRYVIEW(dev_priv)) {
7069 cherryview_disable_rps(dev_priv);
7070 } else if (IS_VALLEYVIEW(dev_priv)) {
7071 valleyview_disable_rps(dev_priv);
7072 } else if (INTEL_GEN(dev_priv) >= 6) {
7073 gen6_disable_rps(dev_priv);
7074 } else if (IS_IRONLAKE_M(dev_priv)) {
7075 ironlake_disable_drps(dev_priv);
7078 dev_priv->rps.enabled = false;
7079 mutex_unlock(&dev_priv->rps.hw_lock);
7082 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
7084 /* We shouldn't be disabling as we submit, so this should be less
7085 * racy than it appears!
7087 if (READ_ONCE(dev_priv->rps.enabled))
7090 /* Powersaving is controlled by the host when inside a VM */
7091 if (intel_vgpu_active(dev_priv))
7094 mutex_lock(&dev_priv->rps.hw_lock);
7096 if (IS_CHERRYVIEW(dev_priv)) {
7097 cherryview_enable_rps(dev_priv);
7098 } else if (IS_VALLEYVIEW(dev_priv)) {
7099 valleyview_enable_rps(dev_priv);
7100 } else if (INTEL_GEN(dev_priv) >= 9) {
7101 gen9_enable_rc6(dev_priv);
7102 gen9_enable_rps(dev_priv);
7103 if (IS_GEN9_BC(dev_priv))
7104 gen6_update_ring_freq(dev_priv);
7105 } else if (IS_BROADWELL(dev_priv)) {
7106 gen8_enable_rps(dev_priv);
7107 gen6_update_ring_freq(dev_priv);
7108 } else if (INTEL_GEN(dev_priv) >= 6) {
7109 gen6_enable_rps(dev_priv);
7110 gen6_update_ring_freq(dev_priv);
7111 } else if (IS_IRONLAKE_M(dev_priv)) {
7112 ironlake_enable_drps(dev_priv);
7113 intel_init_emon(dev_priv);
7116 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
7117 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
7119 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
7120 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
7122 dev_priv->rps.enabled = true;
7123 mutex_unlock(&dev_priv->rps.hw_lock);
7126 static void __intel_autoenable_gt_powersave(struct work_struct *work)
7128 struct drm_i915_private *dev_priv =
7129 container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
7130 struct intel_engine_cs *rcs;
7131 struct drm_i915_gem_request *req;
7133 if (READ_ONCE(dev_priv->rps.enabled))
7136 rcs = dev_priv->engine[RCS];
7137 if (rcs->last_retired_context)
7140 if (!rcs->init_context)
7143 mutex_lock(&dev_priv->drm.struct_mutex);
7145 req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
7149 if (!i915.enable_execlists && i915_switch_context(req) == 0)
7150 rcs->init_context(req);
7152 /* Mark the device busy, calling intel_enable_gt_powersave() */
7153 i915_add_request(req);
7156 mutex_unlock(&dev_priv->drm.struct_mutex);
7158 intel_runtime_pm_put(dev_priv);
7161 void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
7163 if (READ_ONCE(dev_priv->rps.enabled))
7166 if (IS_IRONLAKE_M(dev_priv)) {
7167 ironlake_enable_drps(dev_priv);
7168 intel_init_emon(dev_priv);
7169 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
7171 * PCU communication is slow and this doesn't need to be
7172 * done at any specific time, so do this out of our fast path
7173 * to make resume and init faster.
7175 * We depend on the HW RC6 power context save/restore
7176 * mechanism when entering D3 through runtime PM suspend. So
7177 * disable RPM until RPS/RC6 is properly setup. We can only
7178 * get here via the driver load/system resume/runtime resume
7179 * paths, so the _noresume version is enough (and in case of
7180 * runtime resume it's necessary).
7182 if (queue_delayed_work(dev_priv->wq,
7183 &dev_priv->rps.autoenable_work,
7184 round_jiffies_up_relative(HZ)))
7185 intel_runtime_pm_get_noresume(dev_priv);
7189 static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
7192 * On Ibex Peak and Cougar Point, we need to disable clock
7193 * gating for the panel power sequencer or it will fail to
7194 * start up when no ports are active.
7196 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7199 static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
7203 for_each_pipe(dev_priv, pipe) {
7204 I915_WRITE(DSPCNTR(pipe),
7205 I915_READ(DSPCNTR(pipe)) |
7206 DISPPLANE_TRICKLE_FEED_DISABLE);
7208 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
7209 POSTING_READ(DSPSURF(pipe));
7213 static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
7215 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
7216 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
7217 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
7220 * Don't touch WM1S_LP_EN here.
7221 * Doing so could cause underruns.
7225 static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv)
7227 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
7231 * WaFbcDisableDpfcClockGating:ilk
7233 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
7234 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
7235 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
7237 I915_WRITE(PCH_3DCGDIS0,
7238 MARIUNIT_CLOCK_GATE_DISABLE |
7239 SVSMUNIT_CLOCK_GATE_DISABLE);
7240 I915_WRITE(PCH_3DCGDIS1,
7241 VFMUNIT_CLOCK_GATE_DISABLE);
7244 * According to the spec the following bits should be set in
7245 * order to enable memory self-refresh
7246 * The bit 22/21 of 0x42004
7247 * The bit 5 of 0x42020
7248 * The bit 15 of 0x45000
7250 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7251 (I915_READ(ILK_DISPLAY_CHICKEN2) |
7252 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7253 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
7254 I915_WRITE(DISP_ARB_CTL,
7255 (I915_READ(DISP_ARB_CTL) |
7258 ilk_init_lp_watermarks(dev_priv);
7261 * Based on the document from hardware guys the following bits
7262 * should be set unconditionally in order to enable FBC.
7263 * The bit 22 of 0x42000
7264 * The bit 22 of 0x42004
7265 * The bit 7,8,9 of 0x42020.
7267 if (IS_IRONLAKE_M(dev_priv)) {
7268 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
7269 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7270 I915_READ(ILK_DISPLAY_CHICKEN1) |
7272 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7273 I915_READ(ILK_DISPLAY_CHICKEN2) |
7277 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
7279 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7280 I915_READ(ILK_DISPLAY_CHICKEN2) |
7281 ILK_ELPIN_409_SELECT);
7282 I915_WRITE(_3D_CHICKEN2,
7283 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
7284 _3D_CHICKEN2_WM_READ_PIPELINED);
7286 /* WaDisableRenderCachePipelinedFlush:ilk */
7287 I915_WRITE(CACHE_MODE_0,
7288 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
7290 /* WaDisable_RenderCache_OperationalFlush:ilk */
7291 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7293 g4x_disable_trickle_feed(dev_priv);
7295 ibx_init_clock_gating(dev_priv);
7298 static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
7304 * On Ibex Peak and Cougar Point, we need to disable clock
7305 * gating for the panel power sequencer or it will fail to
7306 * start up when no ports are active.
7308 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
7309 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
7310 PCH_CPUNIT_CLOCK_GATE_DISABLE);
7311 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
7312 DPLS_EDP_PPS_FIX_DIS);
7313 /* The below fixes the weird display corruption, a few pixels shifted
7314 * downward, on (only) LVDS of some HP laptops with IVY.
7316 for_each_pipe(dev_priv, pipe) {
7317 val = I915_READ(TRANS_CHICKEN2(pipe));
7318 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
7319 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
7320 if (dev_priv->vbt.fdi_rx_polarity_inverted)
7321 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
7322 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
7323 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
7324 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
7325 I915_WRITE(TRANS_CHICKEN2(pipe), val);
7327 /* WADP0ClockGatingDisable */
7328 for_each_pipe(dev_priv, pipe) {
7329 I915_WRITE(TRANS_CHICKEN1(pipe),
7330 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7334 static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
7338 tmp = I915_READ(MCH_SSKPD);
7339 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
7340 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
7344 static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
7346 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
7348 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
7350 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7351 I915_READ(ILK_DISPLAY_CHICKEN2) |
7352 ILK_ELPIN_409_SELECT);
7354 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
7355 I915_WRITE(_3D_CHICKEN,
7356 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
7358 /* WaDisable_RenderCache_OperationalFlush:snb */
7359 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7362 * BSpec recoomends 8x4 when MSAA is used,
7363 * however in practice 16x4 seems fastest.
7365 * Note that PS/WM thread counts depend on the WIZ hashing
7366 * disable bit, which we don't touch here, but it's good
7367 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7369 I915_WRITE(GEN6_GT_MODE,
7370 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7372 ilk_init_lp_watermarks(dev_priv);
7374 I915_WRITE(CACHE_MODE_0,
7375 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
7377 I915_WRITE(GEN6_UCGCTL1,
7378 I915_READ(GEN6_UCGCTL1) |
7379 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
7380 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7382 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
7383 * gating disable must be set. Failure to set it results in
7384 * flickering pixels due to Z write ordering failures after
7385 * some amount of runtime in the Mesa "fire" demo, and Unigine
7386 * Sanctuary and Tropics, and apparently anything else with
7387 * alpha test or pixel discard.
7389 * According to the spec, bit 11 (RCCUNIT) must also be set,
7390 * but we didn't debug actual testcases to find it out.
7392 * WaDisableRCCUnitClockGating:snb
7393 * WaDisableRCPBUnitClockGating:snb
7395 I915_WRITE(GEN6_UCGCTL2,
7396 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
7397 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
7399 /* WaStripsFansDisableFastClipPerformanceFix:snb */
7400 I915_WRITE(_3D_CHICKEN3,
7401 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
7405 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
7406 * 3DSTATE_SF number of SF output attributes is more than 16."
7408 I915_WRITE(_3D_CHICKEN3,
7409 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
7412 * According to the spec the following bits should be
7413 * set in order to enable memory self-refresh and fbc:
7414 * The bit21 and bit22 of 0x42000
7415 * The bit21 and bit22 of 0x42004
7416 * The bit5 and bit7 of 0x42020
7417 * The bit14 of 0x70180
7418 * The bit14 of 0x71180
7420 * WaFbcAsynchFlipDisableFbcQueue:snb
7422 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7423 I915_READ(ILK_DISPLAY_CHICKEN1) |
7424 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7425 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7426 I915_READ(ILK_DISPLAY_CHICKEN2) |
7427 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7428 I915_WRITE(ILK_DSPCLK_GATE_D,
7429 I915_READ(ILK_DSPCLK_GATE_D) |
7430 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
7431 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
7433 g4x_disable_trickle_feed(dev_priv);
7435 cpt_init_clock_gating(dev_priv);
7437 gen6_check_mch_setup(dev_priv);
7440 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
7442 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
7445 * WaVSThreadDispatchOverride:ivb,vlv
7447 * This actually overrides the dispatch
7448 * mode for all thread types.
7450 reg &= ~GEN7_FF_SCHED_MASK;
7451 reg |= GEN7_FF_TS_SCHED_HW;
7452 reg |= GEN7_FF_VS_SCHED_HW;
7453 reg |= GEN7_FF_DS_SCHED_HW;
7455 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
7458 static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
7461 * TODO: this bit should only be enabled when really needed, then
7462 * disabled when not needed anymore in order to save power.
7464 if (HAS_PCH_LPT_LP(dev_priv))
7465 I915_WRITE(SOUTH_DSPCLK_GATE_D,
7466 I915_READ(SOUTH_DSPCLK_GATE_D) |
7467 PCH_LP_PARTITION_LEVEL_DISABLE);
7469 /* WADPOClockGatingDisable:hsw */
7470 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
7471 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
7472 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7475 static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
7477 if (HAS_PCH_LPT_LP(dev_priv)) {
7478 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
7480 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7481 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7485 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
7486 int general_prio_credits,
7487 int high_prio_credits)
7491 /* WaTempDisableDOPClkGating:bdw */
7492 misccpctl = I915_READ(GEN7_MISCCPCTL);
7493 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
7495 I915_WRITE(GEN8_L3SQCREG1,
7496 L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
7497 L3_HIGH_PRIO_CREDITS(high_prio_credits));
7500 * Wait at least 100 clocks before re-enabling clock gating.
7501 * See the definition of L3SQCREG1 in BSpec.
7503 POSTING_READ(GEN8_L3SQCREG1);
7505 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
7508 static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
7510 gen9_init_clock_gating(dev_priv);
7512 /* WaDisableSDEUnitClockGating:kbl */
7513 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7514 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7515 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7517 /* WaDisableGamClockGating:kbl */
7518 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7519 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7520 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
7522 /* WaFbcNukeOnHostModify:kbl */
7523 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7524 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7527 static void skylake_init_clock_gating(struct drm_i915_private *dev_priv)
7529 gen9_init_clock_gating(dev_priv);
7531 /* WAC6entrylatency:skl */
7532 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
7533 FBC_LLC_FULLY_OPEN);
7535 /* WaFbcNukeOnHostModify:skl */
7536 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7537 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7540 static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
7544 ilk_init_lp_watermarks(dev_priv);
7546 /* WaSwitchSolVfFArbitrationPriority:bdw */
7547 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7549 /* WaPsrDPAMaskVBlankInSRD:bdw */
7550 I915_WRITE(CHICKEN_PAR1_1,
7551 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
7553 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
7554 for_each_pipe(dev_priv, pipe) {
7555 I915_WRITE(CHICKEN_PIPESL_1(pipe),
7556 I915_READ(CHICKEN_PIPESL_1(pipe)) |
7557 BDW_DPRS_MASK_VBLANK_SRD);
7560 /* WaVSRefCountFullforceMissDisable:bdw */
7561 /* WaDSRefCountFullforceMissDisable:bdw */
7562 I915_WRITE(GEN7_FF_THREAD_MODE,
7563 I915_READ(GEN7_FF_THREAD_MODE) &
7564 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7566 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7567 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7569 /* WaDisableSDEUnitClockGating:bdw */
7570 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7571 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7573 /* WaProgramL3SqcReg1Default:bdw */
7574 gen8_set_l3sqc_credits(dev_priv, 30, 2);
7577 * WaGttCachingOffByDefault:bdw
7578 * GTT cache may not work with big pages, so if those
7579 * are ever enabled GTT cache may need to be disabled.
7581 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7583 /* WaKVMNotificationOnConfigChange:bdw */
7584 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
7585 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
7587 lpt_init_clock_gating(dev_priv);
7589 /* WaDisableDopClockGating:bdw
7591 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
7594 I915_WRITE(GEN6_UCGCTL1,
7595 I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
7598 static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
7600 ilk_init_lp_watermarks(dev_priv);
7602 /* L3 caching of data atomics doesn't work -- disable it. */
7603 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
7604 I915_WRITE(HSW_ROW_CHICKEN3,
7605 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
7607 /* This is required by WaCatErrorRejectionIssue:hsw */
7608 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7609 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7610 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7612 /* WaVSRefCountFullforceMissDisable:hsw */
7613 I915_WRITE(GEN7_FF_THREAD_MODE,
7614 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
7616 /* WaDisable_RenderCache_OperationalFlush:hsw */
7617 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7619 /* enable HiZ Raw Stall Optimization */
7620 I915_WRITE(CACHE_MODE_0_GEN7,
7621 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7623 /* WaDisable4x2SubspanOptimization:hsw */
7624 I915_WRITE(CACHE_MODE_1,
7625 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7628 * BSpec recommends 8x4 when MSAA is used,
7629 * however in practice 16x4 seems fastest.
7631 * Note that PS/WM thread counts depend on the WIZ hashing
7632 * disable bit, which we don't touch here, but it's good
7633 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7635 I915_WRITE(GEN7_GT_MODE,
7636 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7638 /* WaSampleCChickenBitEnable:hsw */
7639 I915_WRITE(HALF_SLICE_CHICKEN3,
7640 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
7642 /* WaSwitchSolVfFArbitrationPriority:hsw */
7643 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7645 /* WaRsPkgCStateDisplayPMReq:hsw */
7646 I915_WRITE(CHICKEN_PAR1_1,
7647 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
7649 lpt_init_clock_gating(dev_priv);
7652 static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv)
7656 ilk_init_lp_watermarks(dev_priv);
7658 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
7660 /* WaDisableEarlyCull:ivb */
7661 I915_WRITE(_3D_CHICKEN3,
7662 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7664 /* WaDisableBackToBackFlipFix:ivb */
7665 I915_WRITE(IVB_CHICKEN3,
7666 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7667 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7669 /* WaDisablePSDDualDispatchEnable:ivb */
7670 if (IS_IVB_GT1(dev_priv))
7671 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7672 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7674 /* WaDisable_RenderCache_OperationalFlush:ivb */
7675 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7677 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
7678 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
7679 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
7681 /* WaApplyL3ControlAndL3ChickenMode:ivb */
7682 I915_WRITE(GEN7_L3CNTLREG1,
7683 GEN7_WA_FOR_GEN7_L3_CONTROL);
7684 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
7685 GEN7_WA_L3_CHICKEN_MODE);
7686 if (IS_IVB_GT1(dev_priv))
7687 I915_WRITE(GEN7_ROW_CHICKEN2,
7688 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7690 /* must write both registers */
7691 I915_WRITE(GEN7_ROW_CHICKEN2,
7692 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7693 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
7694 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7697 /* WaForceL3Serialization:ivb */
7698 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7699 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7702 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7703 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
7705 I915_WRITE(GEN6_UCGCTL2,
7706 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7708 /* This is required by WaCatErrorRejectionIssue:ivb */
7709 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7710 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7711 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7713 g4x_disable_trickle_feed(dev_priv);
7715 gen7_setup_fixed_func_scheduler(dev_priv);
7717 if (0) { /* causes HiZ corruption on ivb:gt1 */
7718 /* enable HiZ Raw Stall Optimization */
7719 I915_WRITE(CACHE_MODE_0_GEN7,
7720 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7723 /* WaDisable4x2SubspanOptimization:ivb */
7724 I915_WRITE(CACHE_MODE_1,
7725 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7728 * BSpec recommends 8x4 when MSAA is used,
7729 * however in practice 16x4 seems fastest.
7731 * Note that PS/WM thread counts depend on the WIZ hashing
7732 * disable bit, which we don't touch here, but it's good
7733 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7735 I915_WRITE(GEN7_GT_MODE,
7736 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7738 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
7739 snpcr &= ~GEN6_MBC_SNPCR_MASK;
7740 snpcr |= GEN6_MBC_SNPCR_MED;
7741 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
7743 if (!HAS_PCH_NOP(dev_priv))
7744 cpt_init_clock_gating(dev_priv);
7746 gen6_check_mch_setup(dev_priv);
7749 static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv)
7751 /* WaDisableEarlyCull:vlv */
7752 I915_WRITE(_3D_CHICKEN3,
7753 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7755 /* WaDisableBackToBackFlipFix:vlv */
7756 I915_WRITE(IVB_CHICKEN3,
7757 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7758 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7760 /* WaPsdDispatchEnable:vlv */
7761 /* WaDisablePSDDualDispatchEnable:vlv */
7762 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7763 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
7764 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7766 /* WaDisable_RenderCache_OperationalFlush:vlv */
7767 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7769 /* WaForceL3Serialization:vlv */
7770 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7771 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7773 /* WaDisableDopClockGating:vlv */
7774 I915_WRITE(GEN7_ROW_CHICKEN2,
7775 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7777 /* This is required by WaCatErrorRejectionIssue:vlv */
7778 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7779 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7780 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7782 gen7_setup_fixed_func_scheduler(dev_priv);
7785 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7786 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
7788 I915_WRITE(GEN6_UCGCTL2,
7789 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7791 /* WaDisableL3Bank2xClockGate:vlv
7792 * Disabling L3 clock gating- MMIO 940c[25] = 1
7793 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
7794 I915_WRITE(GEN7_UCGCTL4,
7795 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
7798 * BSpec says this must be set, even though
7799 * WaDisable4x2SubspanOptimization isn't listed for VLV.
7801 I915_WRITE(CACHE_MODE_1,
7802 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7805 * BSpec recommends 8x4 when MSAA is used,
7806 * however in practice 16x4 seems fastest.
7808 * Note that PS/WM thread counts depend on the WIZ hashing
7809 * disable bit, which we don't touch here, but it's good
7810 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7812 I915_WRITE(GEN7_GT_MODE,
7813 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7816 * WaIncreaseL3CreditsForVLVB0:vlv
7817 * This is the hardware default actually.
7819 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
7822 * WaDisableVLVClockGating_VBIIssue:vlv
7823 * Disable clock gating on th GCFG unit to prevent a delay
7824 * in the reporting of vblank events.
7826 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
7829 static void cherryview_init_clock_gating(struct drm_i915_private *dev_priv)
7831 /* WaVSRefCountFullforceMissDisable:chv */
7832 /* WaDSRefCountFullforceMissDisable:chv */
7833 I915_WRITE(GEN7_FF_THREAD_MODE,
7834 I915_READ(GEN7_FF_THREAD_MODE) &
7835 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7837 /* WaDisableSemaphoreAndSyncFlipWait:chv */
7838 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7839 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7841 /* WaDisableCSUnitClockGating:chv */
7842 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7843 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7845 /* WaDisableSDEUnitClockGating:chv */
7846 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7847 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7850 * WaProgramL3SqcReg1Default:chv
7851 * See gfxspecs/Related Documents/Performance Guide/
7852 * LSQC Setting Recommendations.
7854 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7857 * GTT cache may not work with big pages, so if those
7858 * are ever enabled GTT cache may need to be disabled.
7860 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7863 static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
7865 uint32_t dspclk_gate;
7867 I915_WRITE(RENCLK_GATE_D1, 0);
7868 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7869 GS_UNIT_CLOCK_GATE_DISABLE |
7870 CL_UNIT_CLOCK_GATE_DISABLE);
7871 I915_WRITE(RAMCLK_GATE_D, 0);
7872 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7873 OVRUNIT_CLOCK_GATE_DISABLE |
7874 OVCUNIT_CLOCK_GATE_DISABLE;
7875 if (IS_GM45(dev_priv))
7876 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7877 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7879 /* WaDisableRenderCachePipelinedFlush */
7880 I915_WRITE(CACHE_MODE_0,
7881 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
7883 /* WaDisable_RenderCache_OperationalFlush:g4x */
7884 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7886 g4x_disable_trickle_feed(dev_priv);
7889 static void crestline_init_clock_gating(struct drm_i915_private *dev_priv)
7891 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7892 I915_WRITE(RENCLK_GATE_D2, 0);
7893 I915_WRITE(DSPCLK_GATE_D, 0);
7894 I915_WRITE(RAMCLK_GATE_D, 0);
7895 I915_WRITE16(DEUC, 0);
7896 I915_WRITE(MI_ARB_STATE,
7897 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7899 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7900 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7903 static void broadwater_init_clock_gating(struct drm_i915_private *dev_priv)
7905 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7906 I965_RCC_CLOCK_GATE_DISABLE |
7907 I965_RCPB_CLOCK_GATE_DISABLE |
7908 I965_ISC_CLOCK_GATE_DISABLE |
7909 I965_FBC_CLOCK_GATE_DISABLE);
7910 I915_WRITE(RENCLK_GATE_D2, 0);
7911 I915_WRITE(MI_ARB_STATE,
7912 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7914 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7915 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7918 static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
7920 u32 dstate = I915_READ(D_STATE);
7922 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7923 DSTATE_DOT_CLOCK_GATING;
7924 I915_WRITE(D_STATE, dstate);
7926 if (IS_PINEVIEW(dev_priv))
7927 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
7929 /* IIR "flip pending" means done if this bit is set */
7930 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
7932 /* interrupts should cause a wake up from C3 */
7933 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
7935 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7936 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7938 I915_WRITE(MI_ARB_STATE,
7939 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7942 static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
7944 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7946 /* interrupts should cause a wake up from C3 */
7947 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7948 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7950 I915_WRITE(MEM_MODE,
7951 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
7954 static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
7956 I915_WRITE(MEM_MODE,
7957 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7958 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
7961 void intel_init_clock_gating(struct drm_i915_private *dev_priv)
7963 dev_priv->display.init_clock_gating(dev_priv);
7966 void intel_suspend_hw(struct drm_i915_private *dev_priv)
7968 if (HAS_PCH_LPT(dev_priv))
7969 lpt_suspend_hw(dev_priv);
7972 static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
7974 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
7978 * intel_init_clock_gating_hooks - setup the clock gating hooks
7979 * @dev_priv: device private
7981 * Setup the hooks that configure which clocks of a given platform can be
7982 * gated and also apply various GT and display specific workarounds for these
7983 * platforms. Note that some GT specific workarounds are applied separately
7984 * when GPU contexts or batchbuffers start their execution.
7986 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7988 if (IS_SKYLAKE(dev_priv))
7989 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
7990 else if (IS_KABYLAKE(dev_priv))
7991 dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
7992 else if (IS_BROXTON(dev_priv))
7993 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7994 else if (IS_GEMINILAKE(dev_priv))
7995 dev_priv->display.init_clock_gating = glk_init_clock_gating;
7996 else if (IS_BROADWELL(dev_priv))
7997 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
7998 else if (IS_CHERRYVIEW(dev_priv))
7999 dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
8000 else if (IS_HASWELL(dev_priv))
8001 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
8002 else if (IS_IVYBRIDGE(dev_priv))
8003 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
8004 else if (IS_VALLEYVIEW(dev_priv))
8005 dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
8006 else if (IS_GEN6(dev_priv))
8007 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
8008 else if (IS_GEN5(dev_priv))
8009 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
8010 else if (IS_G4X(dev_priv))
8011 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
8012 else if (IS_I965GM(dev_priv))
8013 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
8014 else if (IS_I965G(dev_priv))
8015 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
8016 else if (IS_GEN3(dev_priv))
8017 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
8018 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
8019 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
8020 else if (IS_GEN2(dev_priv))
8021 dev_priv->display.init_clock_gating = i830_init_clock_gating;
8023 MISSING_CASE(INTEL_DEVID(dev_priv));
8024 dev_priv->display.init_clock_gating = nop_init_clock_gating;
8028 /* Set up chip specific power management-related functions */
8029 void intel_init_pm(struct drm_i915_private *dev_priv)
8031 intel_fbc_init(dev_priv);
8034 if (IS_PINEVIEW(dev_priv))
8035 i915_pineview_get_mem_freq(dev_priv);
8036 else if (IS_GEN5(dev_priv))
8037 i915_ironlake_get_mem_freq(dev_priv);
8039 /* For FIFO watermark updates */
8040 if (INTEL_GEN(dev_priv) >= 9) {
8041 skl_setup_wm_latency(dev_priv);
8042 dev_priv->display.initial_watermarks = skl_initial_wm;
8043 dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
8044 dev_priv->display.compute_global_watermarks = skl_compute_wm;
8045 } else if (HAS_PCH_SPLIT(dev_priv)) {
8046 ilk_setup_wm_latency(dev_priv);
8048 if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
8049 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
8050 (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
8051 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
8052 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
8053 dev_priv->display.compute_intermediate_wm =
8054 ilk_compute_intermediate_wm;
8055 dev_priv->display.initial_watermarks =
8056 ilk_initial_watermarks;
8057 dev_priv->display.optimize_watermarks =
8058 ilk_optimize_watermarks;
8060 DRM_DEBUG_KMS("Failed to read display plane latency. "
8063 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
8064 vlv_setup_wm_latency(dev_priv);
8065 dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
8066 dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
8067 dev_priv->display.initial_watermarks = vlv_initial_watermarks;
8068 dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
8069 dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
8070 } else if (IS_PINEVIEW(dev_priv)) {
8071 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
8074 dev_priv->mem_freq)) {
8075 DRM_INFO("failed to find known CxSR latency "
8076 "(found ddr%s fsb freq %d, mem freq %d), "
8078 (dev_priv->is_ddr3 == 1) ? "3" : "2",
8079 dev_priv->fsb_freq, dev_priv->mem_freq);
8080 /* Disable CxSR and never update its watermark again */
8081 intel_set_memory_cxsr(dev_priv, false);
8082 dev_priv->display.update_wm = NULL;
8084 dev_priv->display.update_wm = pineview_update_wm;
8085 } else if (IS_G4X(dev_priv)) {
8086 dev_priv->display.update_wm = g4x_update_wm;
8087 } else if (IS_GEN4(dev_priv)) {
8088 dev_priv->display.update_wm = i965_update_wm;
8089 } else if (IS_GEN3(dev_priv)) {
8090 dev_priv->display.update_wm = i9xx_update_wm;
8091 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
8092 } else if (IS_GEN2(dev_priv)) {
8093 if (INTEL_INFO(dev_priv)->num_pipes == 1) {
8094 dev_priv->display.update_wm = i845_update_wm;
8095 dev_priv->display.get_fifo_size = i845_get_fifo_size;
8097 dev_priv->display.update_wm = i9xx_update_wm;
8098 dev_priv->display.get_fifo_size = i830_get_fifo_size;
8101 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
8105 static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
8108 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
8111 case GEN6_PCODE_SUCCESS:
8113 case GEN6_PCODE_UNIMPLEMENTED_CMD:
8114 case GEN6_PCODE_ILLEGAL_CMD:
8116 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
8117 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
8119 case GEN6_PCODE_TIMEOUT:
8122 MISSING_CASE(flags);
8127 static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
8130 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
8133 case GEN6_PCODE_SUCCESS:
8135 case GEN6_PCODE_ILLEGAL_CMD:
8137 case GEN7_PCODE_TIMEOUT:
8139 case GEN7_PCODE_ILLEGAL_DATA:
8141 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
8144 MISSING_CASE(flags);
8149 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
8153 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
8155 /* GEN6_PCODE_* are outside of the forcewake domain, we can
8156 * use te fw I915_READ variants to reduce the amount of work
8157 * required when reading/writing.
8160 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
8161 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
8165 I915_WRITE_FW(GEN6_PCODE_DATA, *val);
8166 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
8167 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
8169 if (__intel_wait_for_register_fw(dev_priv,
8170 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
8172 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
8176 *val = I915_READ_FW(GEN6_PCODE_DATA);
8177 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
8179 if (INTEL_GEN(dev_priv) > 6)
8180 status = gen7_check_mailbox_status(dev_priv);
8182 status = gen6_check_mailbox_status(dev_priv);
8185 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
8193 int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
8198 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
8200 /* GEN6_PCODE_* are outside of the forcewake domain, we can
8201 * use te fw I915_READ variants to reduce the amount of work
8202 * required when reading/writing.
8205 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
8206 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
8210 I915_WRITE_FW(GEN6_PCODE_DATA, val);
8211 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
8212 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
8214 if (__intel_wait_for_register_fw(dev_priv,
8215 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
8217 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
8221 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
8223 if (INTEL_GEN(dev_priv) > 6)
8224 status = gen7_check_mailbox_status(dev_priv);
8226 status = gen6_check_mailbox_status(dev_priv);
8229 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
8237 static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
8238 u32 request, u32 reply_mask, u32 reply,
8243 *status = sandybridge_pcode_read(dev_priv, mbox, &val);
8245 return *status || ((val & reply_mask) == reply);
8249 * skl_pcode_request - send PCODE request until acknowledgment
8250 * @dev_priv: device private
8251 * @mbox: PCODE mailbox ID the request is targeted for
8252 * @request: request ID
8253 * @reply_mask: mask used to check for request acknowledgment
8254 * @reply: value used to check for request acknowledgment
8255 * @timeout_base_ms: timeout for polling with preemption enabled
8257 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
8258 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
8259 * The request is acknowledged once the PCODE reply dword equals @reply after
8260 * applying @reply_mask. Polling is first attempted with preemption enabled
8261 * for @timeout_base_ms and if this times out for another 50 ms with
8262 * preemption disabled.
8264 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
8265 * other error as reported by PCODE.
8267 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
8268 u32 reply_mask, u32 reply, int timeout_base_ms)
8273 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
8275 #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
8279 * Prime the PCODE by doing a request first. Normally it guarantees
8280 * that a subsequent request, at most @timeout_base_ms later, succeeds.
8281 * _wait_for() doesn't guarantee when its passed condition is evaluated
8282 * first, so send the first request explicitly.
8288 ret = _wait_for(COND, timeout_base_ms * 1000, 10);
8293 * The above can time out if the number of requests was low (2 in the
8294 * worst case) _and_ PCODE was busy for some reason even after a
8295 * (queued) request and @timeout_base_ms delay. As a workaround retry
8296 * the poll with preemption disabled to maximize the number of
8297 * requests. Increase the timeout from @timeout_base_ms to 50ms to
8298 * account for interrupts that could reduce the number of these
8299 * requests, and for any quirks of the PCODE firmware that delays
8300 * the request completion.
8302 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
8303 WARN_ON_ONCE(timeout_base_ms > 3);
8305 ret = wait_for_atomic(COND, 50);
8309 return ret ? ret : status;
8313 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
8317 * Slow = Fast = GPLL ref * N
8319 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
8322 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
8324 return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
8327 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
8331 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
8333 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
8336 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
8338 /* CHV needs even values */
8339 return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
8342 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
8344 if (IS_GEN9(dev_priv))
8345 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
8347 else if (IS_CHERRYVIEW(dev_priv))
8348 return chv_gpu_freq(dev_priv, val);
8349 else if (IS_VALLEYVIEW(dev_priv))
8350 return byt_gpu_freq(dev_priv, val);
8352 return val * GT_FREQUENCY_MULTIPLIER;
8355 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
8357 if (IS_GEN9(dev_priv))
8358 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
8359 GT_FREQUENCY_MULTIPLIER);
8360 else if (IS_CHERRYVIEW(dev_priv))
8361 return chv_freq_opcode(dev_priv, val);
8362 else if (IS_VALLEYVIEW(dev_priv))
8363 return byt_freq_opcode(dev_priv, val);
8365 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
8368 struct request_boost {
8369 struct work_struct work;
8370 struct drm_i915_gem_request *req;
8373 static void __intel_rps_boost_work(struct work_struct *work)
8375 struct request_boost *boost = container_of(work, struct request_boost, work);
8376 struct drm_i915_gem_request *req = boost->req;
8378 if (!i915_gem_request_completed(req))
8379 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
8381 i915_gem_request_put(req);
8385 void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
8387 struct request_boost *boost;
8389 if (req == NULL || INTEL_GEN(req->i915) < 6)
8392 if (i915_gem_request_completed(req))
8395 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
8399 boost->req = i915_gem_request_get(req);
8401 INIT_WORK(&boost->work, __intel_rps_boost_work);
8402 queue_work(req->i915->wq, &boost->work);
8405 void intel_pm_setup(struct drm_i915_private *dev_priv)
8407 mutex_init(&dev_priv->rps.hw_lock);
8408 spin_lock_init(&dev_priv->rps.client_lock);
8410 INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
8411 __intel_autoenable_gt_powersave);
8412 INIT_LIST_HEAD(&dev_priv->rps.clients);
8414 dev_priv->pm.suspended = false;
8415 atomic_set(&dev_priv->pm.wakeref_count, 0);
8418 static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
8419 const i915_reg_t reg)
8421 u32 lower, upper, tmp;
8424 /* The register accessed do not need forcewake. We borrow
8425 * uncore lock to prevent concurrent access to range reg.
8427 spin_lock_irq(&dev_priv->uncore.lock);
8429 /* vlv and chv residency counters are 40 bits in width.
8430 * With a control bit, we can choose between upper or lower
8431 * 32bit window into this counter.
8433 * Although we always use the counter in high-range mode elsewhere,
8434 * userspace may attempt to read the value before rc6 is initialised,
8435 * before we have set the default VLV_COUNTER_CONTROL value. So always
8436 * set the high bit to be safe.
8438 I915_WRITE_FW(VLV_COUNTER_CONTROL,
8439 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
8440 upper = I915_READ_FW(reg);
8444 I915_WRITE_FW(VLV_COUNTER_CONTROL,
8445 _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
8446 lower = I915_READ_FW(reg);
8448 I915_WRITE_FW(VLV_COUNTER_CONTROL,
8449 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
8450 upper = I915_READ_FW(reg);
8451 } while (upper != tmp && --loop);
8453 /* Everywhere else we always use VLV_COUNTER_CONTROL with the
8454 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
8458 spin_unlock_irq(&dev_priv->uncore.lock);
8460 return lower | (u64)upper << 8;
8463 u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
8464 const i915_reg_t reg)
8466 u64 time_hw, units, div;
8468 if (!intel_enable_rc6())
8471 intel_runtime_pm_get(dev_priv);
8473 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
8474 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
8476 div = dev_priv->czclk_freq;
8478 time_hw = vlv_residency_raw(dev_priv, reg);
8479 } else if (IS_GEN9_LP(dev_priv)) {
8481 div = 1200; /* 833.33ns */
8483 time_hw = I915_READ(reg);
8485 units = 128000; /* 1.28us */
8488 time_hw = I915_READ(reg);
8491 intel_runtime_pm_put(dev_priv);
8492 return DIV_ROUND_UP_ULL(time_hw * units, div);