2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
29 #include <drm/drm_plane_helper.h>
31 #include "intel_drv.h"
32 #include "../../../platform/x86/intel_ips.h"
33 #include <linux/module.h>
34 #include <drm/drm_atomic_helper.h>
39 * RC6 is a special power stage which allows the GPU to enter an very
40 * low-voltage mode when idle, using down to 0V while at this stage. This
41 * stage is entered automatically when the GPU is idle when RC6 support is
42 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
44 * There are different RC6 modes available in Intel GPU, which differentiate
45 * among each other with the latency required to enter and leave RC6 and
46 * voltage consumed by the GPU in different states.
48 * The combination of the following flags define which states GPU is allowed
49 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
50 * RC6pp is deepest RC6. Their support by hardware varies according to the
51 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
52 * which brings the most power savings; deeper states save more power, but
53 * require higher latency to switch to and wake up.
55 #define INTEL_RC6_ENABLE (1<<0)
56 #define INTEL_RC6p_ENABLE (1<<1)
57 #define INTEL_RC6pp_ENABLE (1<<2)
59 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
61 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
62 I915_WRITE(CHICKEN_PAR1_1,
63 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
65 I915_WRITE(GEN8_CONFIG0,
66 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
68 /* WaEnableChickenDCPR:skl,bxt,kbl,glk */
69 I915_WRITE(GEN8_CHICKEN_DCPR_1,
70 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
72 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
73 /* WaFbcWakeMemOn:skl,bxt,kbl,glk */
74 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
76 DISP_FBC_MEMORY_WAKE);
78 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
79 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
80 ILK_DPFC_DISABLE_DUMMY0);
83 static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
85 gen9_init_clock_gating(dev_priv);
87 /* WaDisableSDEUnitClockGating:bxt */
88 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
89 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
93 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
95 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
96 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
99 * Wa: Backlight PWM may stop in the asserted state, causing backlight
102 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
103 PWM1_GATING_DIS | PWM2_GATING_DIS);
106 static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
108 gen9_init_clock_gating(dev_priv);
111 * WaDisablePWMClockGating:glk
112 * Backlight PWM may stop in the asserted state, causing backlight
115 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
116 PWM1_GATING_DIS | PWM2_GATING_DIS);
118 /* WaDDIIOTimeout:glk */
119 if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
120 u32 val = I915_READ(CHICKEN_MISC_2);
121 val &= ~(GLK_CL0_PWR_DOWN |
124 I915_WRITE(CHICKEN_MISC_2, val);
129 static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
133 tmp = I915_READ(CLKCFG);
135 switch (tmp & CLKCFG_FSB_MASK) {
137 dev_priv->fsb_freq = 533; /* 133*4 */
140 dev_priv->fsb_freq = 800; /* 200*4 */
143 dev_priv->fsb_freq = 667; /* 167*4 */
146 dev_priv->fsb_freq = 400; /* 100*4 */
150 switch (tmp & CLKCFG_MEM_MASK) {
152 dev_priv->mem_freq = 533;
155 dev_priv->mem_freq = 667;
158 dev_priv->mem_freq = 800;
162 /* detect pineview DDR3 setting */
163 tmp = I915_READ(CSHRDDR3CTL);
164 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
167 static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
171 ddrpll = I915_READ16(DDRMPLL1);
172 csipll = I915_READ16(CSIPLL0);
174 switch (ddrpll & 0xff) {
176 dev_priv->mem_freq = 800;
179 dev_priv->mem_freq = 1066;
182 dev_priv->mem_freq = 1333;
185 dev_priv->mem_freq = 1600;
188 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
190 dev_priv->mem_freq = 0;
194 dev_priv->ips.r_t = dev_priv->mem_freq;
196 switch (csipll & 0x3ff) {
198 dev_priv->fsb_freq = 3200;
201 dev_priv->fsb_freq = 3733;
204 dev_priv->fsb_freq = 4266;
207 dev_priv->fsb_freq = 4800;
210 dev_priv->fsb_freq = 5333;
213 dev_priv->fsb_freq = 5866;
216 dev_priv->fsb_freq = 6400;
219 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
221 dev_priv->fsb_freq = 0;
225 if (dev_priv->fsb_freq == 3200) {
226 dev_priv->ips.c_m = 0;
227 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
228 dev_priv->ips.c_m = 1;
230 dev_priv->ips.c_m = 2;
234 static const struct cxsr_latency cxsr_latency_table[] = {
235 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
236 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
237 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
238 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
239 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
241 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
242 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
243 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
244 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
245 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
247 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
248 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
249 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
250 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
251 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
253 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
254 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
255 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
256 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
257 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
259 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
260 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
261 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
262 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
263 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
265 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
266 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
267 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
268 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
269 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
272 static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
277 const struct cxsr_latency *latency;
280 if (fsb == 0 || mem == 0)
283 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
284 latency = &cxsr_latency_table[i];
285 if (is_desktop == latency->is_desktop &&
286 is_ddr3 == latency->is_ddr3 &&
287 fsb == latency->fsb_freq && mem == latency->mem_freq)
291 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
296 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
300 mutex_lock(&dev_priv->rps.hw_lock);
302 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
304 val &= ~FORCE_DDR_HIGH_FREQ;
306 val |= FORCE_DDR_HIGH_FREQ;
307 val &= ~FORCE_DDR_LOW_FREQ;
308 val |= FORCE_DDR_FREQ_REQ_ACK;
309 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
311 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
312 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
313 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
315 mutex_unlock(&dev_priv->rps.hw_lock);
318 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
322 mutex_lock(&dev_priv->rps.hw_lock);
324 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
326 val |= DSP_MAXFIFO_PM5_ENABLE;
328 val &= ~DSP_MAXFIFO_PM5_ENABLE;
329 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
331 mutex_unlock(&dev_priv->rps.hw_lock);
334 #define FW_WM(value, plane) \
335 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
337 static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
342 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
343 was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
344 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
345 POSTING_READ(FW_BLC_SELF_VLV);
346 } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
347 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
348 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
349 POSTING_READ(FW_BLC_SELF);
350 } else if (IS_PINEVIEW(dev_priv)) {
351 val = I915_READ(DSPFW3);
352 was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
354 val |= PINEVIEW_SELF_REFRESH_EN;
356 val &= ~PINEVIEW_SELF_REFRESH_EN;
357 I915_WRITE(DSPFW3, val);
358 POSTING_READ(DSPFW3);
359 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
360 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
361 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
362 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
363 I915_WRITE(FW_BLC_SELF, val);
364 POSTING_READ(FW_BLC_SELF);
365 } else if (IS_I915GM(dev_priv)) {
367 * FIXME can't find a bit like this for 915G, and
368 * and yet it does have the related watermark in
369 * FW_BLC_SELF. What's going on?
371 was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
372 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
373 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
374 I915_WRITE(INSTPM, val);
375 POSTING_READ(INSTPM);
380 DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
381 enableddisabled(enable),
382 enableddisabled(was_enabled));
387 bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
391 mutex_lock(&dev_priv->wm.wm_mutex);
392 ret = _intel_set_memory_cxsr(dev_priv, enable);
393 dev_priv->wm.vlv.cxsr = enable;
394 mutex_unlock(&dev_priv->wm.wm_mutex);
400 * Latency for FIFO fetches is dependent on several factors:
401 * - memory configuration (speed, channels)
403 * - current MCH state
404 * It can be fairly high in some situations, so here we assume a fairly
405 * pessimal value. It's a tradeoff between extra memory fetches (if we
406 * set this value too high, the FIFO will fetch frequently to stay full)
407 * and power consumption (set it too low to save power and we might see
408 * FIFO underruns and display "flicker").
410 * A value of 5us seems to be a good balance; safe for very low end
411 * platforms but not overly aggressive on lower latency configs.
413 static const int pessimal_latency_ns = 5000;
415 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
416 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
418 static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
420 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
421 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
422 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
423 enum pipe pipe = crtc->pipe;
424 int sprite0_start, sprite1_start;
427 uint32_t dsparb, dsparb2, dsparb3;
429 dsparb = I915_READ(DSPARB);
430 dsparb2 = I915_READ(DSPARB2);
431 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
432 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
435 dsparb = I915_READ(DSPARB);
436 dsparb2 = I915_READ(DSPARB2);
437 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
438 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
441 dsparb2 = I915_READ(DSPARB2);
442 dsparb3 = I915_READ(DSPARB3);
443 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
444 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
451 fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
452 fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
453 fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
454 fifo_state->plane[PLANE_CURSOR] = 63;
456 DRM_DEBUG_KMS("Pipe %c FIFO size: %d/%d/%d/%d\n",
458 fifo_state->plane[PLANE_PRIMARY],
459 fifo_state->plane[PLANE_SPRITE0],
460 fifo_state->plane[PLANE_SPRITE1],
461 fifo_state->plane[PLANE_CURSOR]);
464 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
466 uint32_t dsparb = I915_READ(DSPARB);
469 size = dsparb & 0x7f;
471 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
473 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
474 plane ? "B" : "A", size);
479 static int i830_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
481 uint32_t dsparb = I915_READ(DSPARB);
484 size = dsparb & 0x1ff;
486 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
487 size >>= 1; /* Convert to cachelines */
489 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
490 plane ? "B" : "A", size);
495 static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
497 uint32_t dsparb = I915_READ(DSPARB);
500 size = dsparb & 0x7f;
501 size >>= 2; /* Convert to cachelines */
503 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
510 /* Pineview has different values for various configs */
511 static const struct intel_watermark_params pineview_display_wm = {
512 .fifo_size = PINEVIEW_DISPLAY_FIFO,
513 .max_wm = PINEVIEW_MAX_WM,
514 .default_wm = PINEVIEW_DFT_WM,
515 .guard_size = PINEVIEW_GUARD_WM,
516 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
518 static const struct intel_watermark_params pineview_display_hplloff_wm = {
519 .fifo_size = PINEVIEW_DISPLAY_FIFO,
520 .max_wm = PINEVIEW_MAX_WM,
521 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
522 .guard_size = PINEVIEW_GUARD_WM,
523 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
525 static const struct intel_watermark_params pineview_cursor_wm = {
526 .fifo_size = PINEVIEW_CURSOR_FIFO,
527 .max_wm = PINEVIEW_CURSOR_MAX_WM,
528 .default_wm = PINEVIEW_CURSOR_DFT_WM,
529 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
530 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
532 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
533 .fifo_size = PINEVIEW_CURSOR_FIFO,
534 .max_wm = PINEVIEW_CURSOR_MAX_WM,
535 .default_wm = PINEVIEW_CURSOR_DFT_WM,
536 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
537 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
539 static const struct intel_watermark_params g4x_wm_info = {
540 .fifo_size = G4X_FIFO_SIZE,
541 .max_wm = G4X_MAX_WM,
542 .default_wm = G4X_MAX_WM,
544 .cacheline_size = G4X_FIFO_LINE_SIZE,
546 static const struct intel_watermark_params g4x_cursor_wm_info = {
547 .fifo_size = I965_CURSOR_FIFO,
548 .max_wm = I965_CURSOR_MAX_WM,
549 .default_wm = I965_CURSOR_DFT_WM,
551 .cacheline_size = G4X_FIFO_LINE_SIZE,
553 static const struct intel_watermark_params i965_cursor_wm_info = {
554 .fifo_size = I965_CURSOR_FIFO,
555 .max_wm = I965_CURSOR_MAX_WM,
556 .default_wm = I965_CURSOR_DFT_WM,
558 .cacheline_size = I915_FIFO_LINE_SIZE,
560 static const struct intel_watermark_params i945_wm_info = {
561 .fifo_size = I945_FIFO_SIZE,
562 .max_wm = I915_MAX_WM,
565 .cacheline_size = I915_FIFO_LINE_SIZE,
567 static const struct intel_watermark_params i915_wm_info = {
568 .fifo_size = I915_FIFO_SIZE,
569 .max_wm = I915_MAX_WM,
572 .cacheline_size = I915_FIFO_LINE_SIZE,
574 static const struct intel_watermark_params i830_a_wm_info = {
575 .fifo_size = I855GM_FIFO_SIZE,
576 .max_wm = I915_MAX_WM,
579 .cacheline_size = I830_FIFO_LINE_SIZE,
581 static const struct intel_watermark_params i830_bc_wm_info = {
582 .fifo_size = I855GM_FIFO_SIZE,
583 .max_wm = I915_MAX_WM/2,
586 .cacheline_size = I830_FIFO_LINE_SIZE,
588 static const struct intel_watermark_params i845_wm_info = {
589 .fifo_size = I830_FIFO_SIZE,
590 .max_wm = I915_MAX_WM,
593 .cacheline_size = I830_FIFO_LINE_SIZE,
597 * intel_calculate_wm - calculate watermark level
598 * @clock_in_khz: pixel clock
599 * @wm: chip FIFO params
600 * @cpp: bytes per pixel
601 * @latency_ns: memory latency for the platform
603 * Calculate the watermark level (the level at which the display plane will
604 * start fetching from memory again). Each chip has a different display
605 * FIFO size and allocation, so the caller needs to figure that out and pass
606 * in the correct intel_watermark_params structure.
608 * As the pixel clock runs, the FIFO will be drained at a rate that depends
609 * on the pixel size. When it reaches the watermark level, it'll start
610 * fetching FIFO line sized based chunks from memory until the FIFO fills
611 * past the watermark point. If the FIFO drains completely, a FIFO underrun
612 * will occur, and a display engine hang could result.
614 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
615 const struct intel_watermark_params *wm,
616 int fifo_size, int cpp,
617 unsigned long latency_ns)
619 long entries_required, wm_size;
622 * Note: we need to make sure we don't overflow for various clock &
624 * clocks go from a few thousand to several hundred thousand.
625 * latency is usually a few thousand
627 entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
629 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
631 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
633 wm_size = fifo_size - (entries_required + wm->guard_size);
635 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
637 /* Don't promote wm_size to unsigned... */
638 if (wm_size > (long)wm->max_wm)
639 wm_size = wm->max_wm;
641 wm_size = wm->default_wm;
644 * Bspec seems to indicate that the value shouldn't be lower than
645 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
646 * Lets go for 8 which is the burst size since certain platforms
647 * already use a hardcoded 8 (which is what the spec says should be
656 static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
658 struct intel_crtc *crtc, *enabled = NULL;
660 for_each_intel_crtc(&dev_priv->drm, crtc) {
661 if (intel_crtc_active(crtc)) {
671 static void pineview_update_wm(struct intel_crtc *unused_crtc)
673 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
674 struct intel_crtc *crtc;
675 const struct cxsr_latency *latency;
679 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
684 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
685 intel_set_memory_cxsr(dev_priv, false);
689 crtc = single_enabled_crtc(dev_priv);
691 const struct drm_display_mode *adjusted_mode =
692 &crtc->config->base.adjusted_mode;
693 const struct drm_framebuffer *fb =
694 crtc->base.primary->state->fb;
695 int cpp = fb->format->cpp[0];
696 int clock = adjusted_mode->crtc_clock;
699 wm = intel_calculate_wm(clock, &pineview_display_wm,
700 pineview_display_wm.fifo_size,
701 cpp, latency->display_sr);
702 reg = I915_READ(DSPFW1);
703 reg &= ~DSPFW_SR_MASK;
704 reg |= FW_WM(wm, SR);
705 I915_WRITE(DSPFW1, reg);
706 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
709 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
710 pineview_display_wm.fifo_size,
711 cpp, latency->cursor_sr);
712 reg = I915_READ(DSPFW3);
713 reg &= ~DSPFW_CURSOR_SR_MASK;
714 reg |= FW_WM(wm, CURSOR_SR);
715 I915_WRITE(DSPFW3, reg);
717 /* Display HPLL off SR */
718 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
719 pineview_display_hplloff_wm.fifo_size,
720 cpp, latency->display_hpll_disable);
721 reg = I915_READ(DSPFW3);
722 reg &= ~DSPFW_HPLL_SR_MASK;
723 reg |= FW_WM(wm, HPLL_SR);
724 I915_WRITE(DSPFW3, reg);
726 /* cursor HPLL off SR */
727 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
728 pineview_display_hplloff_wm.fifo_size,
729 cpp, latency->cursor_hpll_disable);
730 reg = I915_READ(DSPFW3);
731 reg &= ~DSPFW_HPLL_CURSOR_MASK;
732 reg |= FW_WM(wm, HPLL_CURSOR);
733 I915_WRITE(DSPFW3, reg);
734 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
736 intel_set_memory_cxsr(dev_priv, true);
738 intel_set_memory_cxsr(dev_priv, false);
742 static bool g4x_compute_wm0(struct drm_i915_private *dev_priv,
744 const struct intel_watermark_params *display,
745 int display_latency_ns,
746 const struct intel_watermark_params *cursor,
747 int cursor_latency_ns,
751 struct intel_crtc *crtc;
752 const struct drm_display_mode *adjusted_mode;
753 const struct drm_framebuffer *fb;
754 int htotal, hdisplay, clock, cpp;
755 int line_time_us, line_count;
756 int entries, tlb_miss;
758 crtc = intel_get_crtc_for_plane(dev_priv, plane);
759 if (!intel_crtc_active(crtc)) {
760 *cursor_wm = cursor->guard_size;
761 *plane_wm = display->guard_size;
765 adjusted_mode = &crtc->config->base.adjusted_mode;
766 fb = crtc->base.primary->state->fb;
767 clock = adjusted_mode->crtc_clock;
768 htotal = adjusted_mode->crtc_htotal;
769 hdisplay = crtc->config->pipe_src_w;
770 cpp = fb->format->cpp[0];
772 /* Use the small buffer method to calculate plane watermark */
773 entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
774 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
777 entries = DIV_ROUND_UP(entries, display->cacheline_size);
778 *plane_wm = entries + display->guard_size;
779 if (*plane_wm > (int)display->max_wm)
780 *plane_wm = display->max_wm;
782 /* Use the large buffer method to calculate cursor watermark */
783 line_time_us = max(htotal * 1000 / clock, 1);
784 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
785 entries = line_count * crtc->base.cursor->state->crtc_w * cpp;
786 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
789 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
790 *cursor_wm = entries + cursor->guard_size;
791 if (*cursor_wm > (int)cursor->max_wm)
792 *cursor_wm = (int)cursor->max_wm;
798 * Check the wm result.
800 * If any calculated watermark values is larger than the maximum value that
801 * can be programmed into the associated watermark register, that watermark
804 static bool g4x_check_srwm(struct drm_i915_private *dev_priv,
805 int display_wm, int cursor_wm,
806 const struct intel_watermark_params *display,
807 const struct intel_watermark_params *cursor)
809 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
810 display_wm, cursor_wm);
812 if (display_wm > display->max_wm) {
813 DRM_DEBUG_KMS("display watermark is too large(%d/%u), disabling\n",
814 display_wm, display->max_wm);
818 if (cursor_wm > cursor->max_wm) {
819 DRM_DEBUG_KMS("cursor watermark is too large(%d/%u), disabling\n",
820 cursor_wm, cursor->max_wm);
824 if (!(display_wm || cursor_wm)) {
825 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
832 static bool g4x_compute_srwm(struct drm_i915_private *dev_priv,
835 const struct intel_watermark_params *display,
836 const struct intel_watermark_params *cursor,
837 int *display_wm, int *cursor_wm)
839 struct intel_crtc *crtc;
840 const struct drm_display_mode *adjusted_mode;
841 const struct drm_framebuffer *fb;
842 int hdisplay, htotal, cpp, clock;
843 unsigned long line_time_us;
844 int line_count, line_size;
849 *display_wm = *cursor_wm = 0;
853 crtc = intel_get_crtc_for_plane(dev_priv, plane);
854 adjusted_mode = &crtc->config->base.adjusted_mode;
855 fb = crtc->base.primary->state->fb;
856 clock = adjusted_mode->crtc_clock;
857 htotal = adjusted_mode->crtc_htotal;
858 hdisplay = crtc->config->pipe_src_w;
859 cpp = fb->format->cpp[0];
861 line_time_us = max(htotal * 1000 / clock, 1);
862 line_count = (latency_ns / line_time_us + 1000) / 1000;
863 line_size = hdisplay * cpp;
865 /* Use the minimum of the small and large buffer method for primary */
866 small = ((clock * cpp / 1000) * latency_ns) / 1000;
867 large = line_count * line_size;
869 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
870 *display_wm = entries + display->guard_size;
872 /* calculate the self-refresh watermark for display cursor */
873 entries = line_count * cpp * crtc->base.cursor->state->crtc_w;
874 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
875 *cursor_wm = entries + cursor->guard_size;
877 return g4x_check_srwm(dev_priv,
878 *display_wm, *cursor_wm,
882 #define FW_WM_VLV(value, plane) \
883 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
885 static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
886 const struct vlv_wm_values *wm)
890 for_each_pipe(dev_priv, pipe) {
891 I915_WRITE(VLV_DDL(pipe),
892 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
893 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
894 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
895 (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
899 * Zero the (unused) WM1 watermarks, and also clear all the
900 * high order bits so that there are no out of bounds values
901 * present in the registers during the reprogramming.
903 I915_WRITE(DSPHOWM, 0);
904 I915_WRITE(DSPHOWM1, 0);
905 I915_WRITE(DSPFW4, 0);
906 I915_WRITE(DSPFW5, 0);
907 I915_WRITE(DSPFW6, 0);
910 FW_WM(wm->sr.plane, SR) |
911 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
912 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
913 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
915 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
916 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
917 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
919 FW_WM(wm->sr.cursor, CURSOR_SR));
921 if (IS_CHERRYVIEW(dev_priv)) {
922 I915_WRITE(DSPFW7_CHV,
923 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
924 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
925 I915_WRITE(DSPFW8_CHV,
926 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
927 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
928 I915_WRITE(DSPFW9_CHV,
929 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
930 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
932 FW_WM(wm->sr.plane >> 9, SR_HI) |
933 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
934 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
935 FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
936 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
937 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
938 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
939 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
940 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
941 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
944 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
945 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
947 FW_WM(wm->sr.plane >> 9, SR_HI) |
948 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
949 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
950 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
951 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
952 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
953 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
956 POSTING_READ(DSPFW1);
961 /* latency must be in 0.1us units. */
962 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
963 unsigned int pipe_htotal,
964 unsigned int horiz_pixels,
966 unsigned int latency)
970 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
971 ret = (ret + 1) * horiz_pixels * cpp;
972 ret = DIV_ROUND_UP(ret, 64);
977 static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
979 /* all latencies in usec */
980 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
982 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
984 if (IS_CHERRYVIEW(dev_priv)) {
985 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
986 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
988 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
992 static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
993 const struct intel_plane_state *plane_state,
996 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
997 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
998 const struct drm_display_mode *adjusted_mode =
999 &crtc_state->base.adjusted_mode;
1000 int clock, htotal, cpp, width, wm;
1002 if (dev_priv->wm.pri_latency[level] == 0)
1005 if (!plane_state->base.visible)
1008 cpp = plane_state->base.fb->format->cpp[0];
1009 clock = adjusted_mode->crtc_clock;
1010 htotal = adjusted_mode->crtc_htotal;
1011 width = crtc_state->pipe_src_w;
1012 if (WARN_ON(htotal == 0))
1015 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1017 * FIXME the formula gives values that are
1018 * too big for the cursor FIFO, and hence we
1019 * would never be able to use cursors. For
1020 * now just hardcode the watermark.
1024 wm = vlv_wm_method2(clock, htotal, width, cpp,
1025 dev_priv->wm.pri_latency[level] * 10);
1028 return min_t(int, wm, USHRT_MAX);
1031 static void vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1033 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1034 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1035 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1036 struct drm_device *dev = crtc->base.dev;
1037 struct intel_plane *plane;
1038 unsigned int total_rate = 0;
1039 const int fifo_size = 512 - 1;
1040 int fifo_extra, fifo_left = fifo_size;
1042 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1043 struct intel_plane_state *state =
1044 to_intel_plane_state(plane->base.state);
1046 if (plane->id == PLANE_CURSOR)
1049 if (state->base.visible) {
1050 wm_state->num_active_planes++;
1051 total_rate += state->base.fb->format->cpp[0];
1055 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1056 struct intel_plane_state *state =
1057 to_intel_plane_state(plane->base.state);
1060 if (plane->id == PLANE_CURSOR) {
1061 fifo_state->plane[plane->id] = 63;
1065 if (!state->base.visible) {
1066 fifo_state->plane[plane->id] = 0;
1070 rate = state->base.fb->format->cpp[0];
1071 fifo_state->plane[plane->id] = fifo_size * rate / total_rate;
1072 fifo_left -= fifo_state->plane[plane->id];
1075 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
1077 /* spread the remainder evenly */
1078 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1084 if (plane->id == PLANE_CURSOR)
1087 /* give it all to the first plane if none are active */
1088 if (fifo_state->plane[plane->id] == 0 &&
1089 wm_state->num_active_planes)
1092 plane_extra = min(fifo_extra, fifo_left);
1093 fifo_state->plane[plane->id] += plane_extra;
1094 fifo_left -= plane_extra;
1097 WARN_ON(fifo_left != 0);
1100 static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1105 return fifo_size - wm;
1108 static void vlv_invert_wms(struct intel_crtc_state *crtc_state)
1110 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1111 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1112 const struct vlv_fifo_state *fifo_state =
1113 &crtc_state->wm.vlv.fifo_state;
1116 for (level = 0; level < wm_state->num_levels; level++) {
1117 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1118 const int sr_fifo_size =
1119 INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
1120 enum plane_id plane_id;
1122 wm_state->sr[level].plane =
1123 vlv_invert_wm_value(wm_state->sr[level].plane,
1125 wm_state->sr[level].cursor =
1126 vlv_invert_wm_value(wm_state->sr[level].cursor,
1129 for_each_plane_id_on_crtc(crtc, plane_id) {
1130 wm_state->wm[level].plane[plane_id] =
1131 vlv_invert_wm_value(wm_state->wm[level].plane[plane_id],
1132 fifo_state->plane[plane_id]);
1137 static void vlv_compute_wm(struct intel_crtc_state *crtc_state)
1139 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1140 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1141 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1142 const struct vlv_fifo_state *fifo_state =
1143 &crtc_state->wm.vlv.fifo_state;
1144 struct intel_plane *plane;
1147 memset(wm_state, 0, sizeof(*wm_state));
1149 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
1150 wm_state->num_levels = dev_priv->wm.max_level + 1;
1152 wm_state->num_active_planes = 0;
1154 vlv_compute_fifo(crtc_state);
1156 if (wm_state->num_active_planes != 1)
1157 wm_state->cxsr = false;
1159 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
1160 struct intel_plane_state *state =
1161 to_intel_plane_state(plane->base.state);
1164 if (!state->base.visible)
1167 /* normal watermarks */
1168 for (level = 0; level < wm_state->num_levels; level++) {
1169 int wm = vlv_compute_wm_level(crtc_state, state, level);
1170 int max_wm = fifo_state->plane[plane->id];
1173 if (WARN_ON(level == 0 && wm > max_wm))
1179 wm_state->wm[level].plane[plane->id] = wm;
1182 wm_state->num_levels = level;
1184 if (!wm_state->cxsr)
1187 /* maxfifo watermarks */
1188 if (plane->id == PLANE_CURSOR) {
1189 for (level = 0; level < wm_state->num_levels; level++)
1190 wm_state->sr[level].cursor =
1191 wm_state->wm[level].plane[PLANE_CURSOR];
1193 for (level = 0; level < wm_state->num_levels; level++)
1194 wm_state->sr[level].plane =
1195 max(wm_state->sr[level].plane,
1196 wm_state->wm[level].plane[plane->id]);
1200 /* clear any (partially) filled invalid levels */
1201 for (level = wm_state->num_levels; level < dev_priv->wm.max_level + 1; level++) {
1202 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
1203 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
1206 vlv_invert_wms(crtc_state);
1209 #define VLV_FIFO(plane, value) \
1210 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1212 static void vlv_pipe_set_fifo_size(const struct intel_crtc_state *crtc_state)
1214 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1215 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1216 const struct vlv_fifo_state *fifo_state =
1217 &crtc_state->wm.vlv.fifo_state;
1218 int sprite0_start, sprite1_start, fifo_size;
1220 sprite0_start = fifo_state->plane[PLANE_PRIMARY];
1221 sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
1222 fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
1224 WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63);
1225 WARN_ON(fifo_size != 511);
1227 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1228 pipe_name(crtc->pipe), sprite0_start,
1229 sprite1_start, fifo_size);
1231 spin_lock(&dev_priv->wm.dsparb_lock);
1233 switch (crtc->pipe) {
1234 uint32_t dsparb, dsparb2, dsparb3;
1236 dsparb = I915_READ(DSPARB);
1237 dsparb2 = I915_READ(DSPARB2);
1239 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1240 VLV_FIFO(SPRITEB, 0xff));
1241 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1242 VLV_FIFO(SPRITEB, sprite1_start));
1244 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1245 VLV_FIFO(SPRITEB_HI, 0x1));
1246 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1247 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1249 I915_WRITE(DSPARB, dsparb);
1250 I915_WRITE(DSPARB2, dsparb2);
1253 dsparb = I915_READ(DSPARB);
1254 dsparb2 = I915_READ(DSPARB2);
1256 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1257 VLV_FIFO(SPRITED, 0xff));
1258 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1259 VLV_FIFO(SPRITED, sprite1_start));
1261 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1262 VLV_FIFO(SPRITED_HI, 0xff));
1263 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1264 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1266 I915_WRITE(DSPARB, dsparb);
1267 I915_WRITE(DSPARB2, dsparb2);
1270 dsparb3 = I915_READ(DSPARB3);
1271 dsparb2 = I915_READ(DSPARB2);
1273 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1274 VLV_FIFO(SPRITEF, 0xff));
1275 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1276 VLV_FIFO(SPRITEF, sprite1_start));
1278 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1279 VLV_FIFO(SPRITEF_HI, 0xff));
1280 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1281 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1283 I915_WRITE(DSPARB3, dsparb3);
1284 I915_WRITE(DSPARB2, dsparb2);
1290 POSTING_READ(DSPARB);
1292 spin_unlock(&dev_priv->wm.dsparb_lock);
1297 static void vlv_merge_wm(struct drm_i915_private *dev_priv,
1298 struct vlv_wm_values *wm)
1300 struct intel_crtc *crtc;
1301 int num_active_crtcs = 0;
1303 wm->level = dev_priv->wm.max_level;
1306 for_each_intel_crtc(&dev_priv->drm, crtc) {
1307 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
1312 if (!wm_state->cxsr)
1316 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1319 if (num_active_crtcs != 1)
1322 if (num_active_crtcs > 1)
1323 wm->level = VLV_WM_LEVEL_PM2;
1325 for_each_intel_crtc(&dev_priv->drm, crtc) {
1326 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
1327 enum pipe pipe = crtc->pipe;
1332 wm->pipe[pipe] = wm_state->wm[wm->level];
1334 wm->sr = wm_state->sr[wm->level];
1336 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
1337 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
1338 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
1339 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
1343 static bool is_disabling(int old, int new, int threshold)
1345 return old >= threshold && new < threshold;
1348 static bool is_enabling(int old, int new, int threshold)
1350 return old < threshold && new >= threshold;
1353 static void vlv_update_wm(struct intel_crtc *crtc)
1355 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1356 struct intel_crtc_state *crtc_state =
1357 to_intel_crtc_state(crtc->base.state);
1358 enum pipe pipe = crtc->pipe;
1359 struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
1360 struct vlv_wm_values new_wm = {};
1362 vlv_compute_wm(crtc_state);
1363 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
1364 vlv_merge_wm(dev_priv, &new_wm);
1366 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) {
1367 /* FIXME should be part of crtc atomic commit */
1368 vlv_pipe_set_fifo_size(crtc_state);
1372 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
1373 chv_set_memory_dvfs(dev_priv, false);
1375 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
1376 chv_set_memory_pm5(dev_priv, false);
1378 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1379 _intel_set_memory_cxsr(dev_priv, false);
1381 /* FIXME should be part of crtc atomic commit */
1382 vlv_pipe_set_fifo_size(crtc_state);
1384 vlv_write_wm_values(dev_priv, &new_wm);
1386 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1387 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1388 pipe_name(pipe), new_wm.pipe[pipe].plane[PLANE_PRIMARY], new_wm.pipe[pipe].plane[PLANE_CURSOR],
1389 new_wm.pipe[pipe].plane[PLANE_SPRITE0], new_wm.pipe[pipe].plane[PLANE_SPRITE1],
1390 new_wm.sr.plane, new_wm.sr.cursor, new_wm.level, new_wm.cxsr);
1392 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1393 _intel_set_memory_cxsr(dev_priv, true);
1395 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
1396 chv_set_memory_pm5(dev_priv, true);
1398 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
1399 chv_set_memory_dvfs(dev_priv, true);
1404 #define single_plane_enabled(mask) is_power_of_2(mask)
1406 static void g4x_update_wm(struct intel_crtc *crtc)
1408 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1409 static const int sr_latency_ns = 12000;
1410 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1411 int plane_sr, cursor_sr;
1412 unsigned int enabled = 0;
1415 if (g4x_compute_wm0(dev_priv, PIPE_A,
1416 &g4x_wm_info, pessimal_latency_ns,
1417 &g4x_cursor_wm_info, pessimal_latency_ns,
1418 &planea_wm, &cursora_wm))
1419 enabled |= 1 << PIPE_A;
1421 if (g4x_compute_wm0(dev_priv, PIPE_B,
1422 &g4x_wm_info, pessimal_latency_ns,
1423 &g4x_cursor_wm_info, pessimal_latency_ns,
1424 &planeb_wm, &cursorb_wm))
1425 enabled |= 1 << PIPE_B;
1427 if (single_plane_enabled(enabled) &&
1428 g4x_compute_srwm(dev_priv, ffs(enabled) - 1,
1431 &g4x_cursor_wm_info,
1432 &plane_sr, &cursor_sr)) {
1433 cxsr_enabled = true;
1435 cxsr_enabled = false;
1436 intel_set_memory_cxsr(dev_priv, false);
1437 plane_sr = cursor_sr = 0;
1440 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1441 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1442 planea_wm, cursora_wm,
1443 planeb_wm, cursorb_wm,
1444 plane_sr, cursor_sr);
1447 FW_WM(plane_sr, SR) |
1448 FW_WM(cursorb_wm, CURSORB) |
1449 FW_WM(planeb_wm, PLANEB) |
1450 FW_WM(planea_wm, PLANEA));
1452 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1453 FW_WM(cursora_wm, CURSORA));
1454 /* HPLL off in SR has some issues on G4x... disable it */
1456 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1457 FW_WM(cursor_sr, CURSOR_SR));
1460 intel_set_memory_cxsr(dev_priv, true);
1463 static void i965_update_wm(struct intel_crtc *unused_crtc)
1465 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
1466 struct intel_crtc *crtc;
1471 /* Calc sr entries for one plane configs */
1472 crtc = single_enabled_crtc(dev_priv);
1474 /* self-refresh has much higher latency */
1475 static const int sr_latency_ns = 12000;
1476 const struct drm_display_mode *adjusted_mode =
1477 &crtc->config->base.adjusted_mode;
1478 const struct drm_framebuffer *fb =
1479 crtc->base.primary->state->fb;
1480 int clock = adjusted_mode->crtc_clock;
1481 int htotal = adjusted_mode->crtc_htotal;
1482 int hdisplay = crtc->config->pipe_src_w;
1483 int cpp = fb->format->cpp[0];
1484 unsigned long line_time_us;
1487 line_time_us = max(htotal * 1000 / clock, 1);
1489 /* Use ns/us then divide to preserve precision */
1490 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1492 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1493 srwm = I965_FIFO_SIZE - entries;
1497 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1500 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1501 cpp * crtc->base.cursor->state->crtc_w;
1502 entries = DIV_ROUND_UP(entries,
1503 i965_cursor_wm_info.cacheline_size);
1504 cursor_sr = i965_cursor_wm_info.fifo_size -
1505 (entries + i965_cursor_wm_info.guard_size);
1507 if (cursor_sr > i965_cursor_wm_info.max_wm)
1508 cursor_sr = i965_cursor_wm_info.max_wm;
1510 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1511 "cursor %d\n", srwm, cursor_sr);
1513 cxsr_enabled = true;
1515 cxsr_enabled = false;
1516 /* Turn off self refresh if both pipes are enabled */
1517 intel_set_memory_cxsr(dev_priv, false);
1520 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1523 /* 965 has limitations... */
1524 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1528 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1529 FW_WM(8, PLANEC_OLD));
1530 /* update cursor SR watermark */
1531 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
1534 intel_set_memory_cxsr(dev_priv, true);
1539 static void i9xx_update_wm(struct intel_crtc *unused_crtc)
1541 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
1542 const struct intel_watermark_params *wm_info;
1547 int planea_wm, planeb_wm;
1548 struct intel_crtc *crtc, *enabled = NULL;
1550 if (IS_I945GM(dev_priv))
1551 wm_info = &i945_wm_info;
1552 else if (!IS_GEN2(dev_priv))
1553 wm_info = &i915_wm_info;
1555 wm_info = &i830_a_wm_info;
1557 fifo_size = dev_priv->display.get_fifo_size(dev_priv, 0);
1558 crtc = intel_get_crtc_for_plane(dev_priv, 0);
1559 if (intel_crtc_active(crtc)) {
1560 const struct drm_display_mode *adjusted_mode =
1561 &crtc->config->base.adjusted_mode;
1562 const struct drm_framebuffer *fb =
1563 crtc->base.primary->state->fb;
1566 if (IS_GEN2(dev_priv))
1569 cpp = fb->format->cpp[0];
1571 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1572 wm_info, fifo_size, cpp,
1573 pessimal_latency_ns);
1576 planea_wm = fifo_size - wm_info->guard_size;
1577 if (planea_wm > (long)wm_info->max_wm)
1578 planea_wm = wm_info->max_wm;
1581 if (IS_GEN2(dev_priv))
1582 wm_info = &i830_bc_wm_info;
1584 fifo_size = dev_priv->display.get_fifo_size(dev_priv, 1);
1585 crtc = intel_get_crtc_for_plane(dev_priv, 1);
1586 if (intel_crtc_active(crtc)) {
1587 const struct drm_display_mode *adjusted_mode =
1588 &crtc->config->base.adjusted_mode;
1589 const struct drm_framebuffer *fb =
1590 crtc->base.primary->state->fb;
1593 if (IS_GEN2(dev_priv))
1596 cpp = fb->format->cpp[0];
1598 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1599 wm_info, fifo_size, cpp,
1600 pessimal_latency_ns);
1601 if (enabled == NULL)
1606 planeb_wm = fifo_size - wm_info->guard_size;
1607 if (planeb_wm > (long)wm_info->max_wm)
1608 planeb_wm = wm_info->max_wm;
1611 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1613 if (IS_I915GM(dev_priv) && enabled) {
1614 struct drm_i915_gem_object *obj;
1616 obj = intel_fb_obj(enabled->base.primary->state->fb);
1618 /* self-refresh seems busted with untiled */
1619 if (!i915_gem_object_is_tiled(obj))
1624 * Overlay gets an aggressive default since video jitter is bad.
1628 /* Play safe and disable self-refresh before adjusting watermarks. */
1629 intel_set_memory_cxsr(dev_priv, false);
1631 /* Calc sr entries for one plane configs */
1632 if (HAS_FW_BLC(dev_priv) && enabled) {
1633 /* self-refresh has much higher latency */
1634 static const int sr_latency_ns = 6000;
1635 const struct drm_display_mode *adjusted_mode =
1636 &enabled->config->base.adjusted_mode;
1637 const struct drm_framebuffer *fb =
1638 enabled->base.primary->state->fb;
1639 int clock = adjusted_mode->crtc_clock;
1640 int htotal = adjusted_mode->crtc_htotal;
1641 int hdisplay = enabled->config->pipe_src_w;
1643 unsigned long line_time_us;
1646 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
1649 cpp = fb->format->cpp[0];
1651 line_time_us = max(htotal * 1000 / clock, 1);
1653 /* Use ns/us then divide to preserve precision */
1654 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1656 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1657 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1658 srwm = wm_info->fifo_size - entries;
1662 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1663 I915_WRITE(FW_BLC_SELF,
1664 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1666 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1669 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1670 planea_wm, planeb_wm, cwm, srwm);
1672 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1673 fwater_hi = (cwm & 0x1f);
1675 /* Set request length to 8 cachelines per fetch */
1676 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1677 fwater_hi = fwater_hi | (1 << 8);
1679 I915_WRITE(FW_BLC, fwater_lo);
1680 I915_WRITE(FW_BLC2, fwater_hi);
1683 intel_set_memory_cxsr(dev_priv, true);
1686 static void i845_update_wm(struct intel_crtc *unused_crtc)
1688 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
1689 struct intel_crtc *crtc;
1690 const struct drm_display_mode *adjusted_mode;
1694 crtc = single_enabled_crtc(dev_priv);
1698 adjusted_mode = &crtc->config->base.adjusted_mode;
1699 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1701 dev_priv->display.get_fifo_size(dev_priv, 0),
1702 4, pessimal_latency_ns);
1703 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1704 fwater_lo |= (3<<8) | planea_wm;
1706 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1708 I915_WRITE(FW_BLC, fwater_lo);
1711 /* latency must be in 0.1us units. */
1712 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
1716 if (WARN(latency == 0, "Latency value missing\n"))
1719 ret = (uint64_t) pixel_rate * cpp * latency;
1720 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1725 /* latency must be in 0.1us units. */
1726 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1727 uint32_t horiz_pixels, uint8_t cpp,
1732 if (WARN(latency == 0, "Latency value missing\n"))
1734 if (WARN_ON(!pipe_htotal))
1737 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1738 ret = (ret + 1) * horiz_pixels * cpp;
1739 ret = DIV_ROUND_UP(ret, 64) + 2;
1743 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1747 * Neither of these should be possible since this function shouldn't be
1748 * called if the CRTC is off or the plane is invisible. But let's be
1749 * extra paranoid to avoid a potential divide-by-zero if we screw up
1750 * elsewhere in the driver.
1754 if (WARN_ON(!horiz_pixels))
1757 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
1760 struct ilk_wm_maximums {
1768 * For both WM_PIPE and WM_LP.
1769 * mem_value must be in 0.1us units.
1771 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
1772 const struct intel_plane_state *pstate,
1776 uint32_t method1, method2;
1779 if (!cstate->base.active || !pstate->base.visible)
1782 cpp = pstate->base.fb->format->cpp[0];
1784 method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
1789 method2 = ilk_wm_method2(cstate->pixel_rate,
1790 cstate->base.adjusted_mode.crtc_htotal,
1791 drm_rect_width(&pstate->base.dst),
1794 return min(method1, method2);
1798 * For both WM_PIPE and WM_LP.
1799 * mem_value must be in 0.1us units.
1801 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
1802 const struct intel_plane_state *pstate,
1805 uint32_t method1, method2;
1808 if (!cstate->base.active || !pstate->base.visible)
1811 cpp = pstate->base.fb->format->cpp[0];
1813 method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
1814 method2 = ilk_wm_method2(cstate->pixel_rate,
1815 cstate->base.adjusted_mode.crtc_htotal,
1816 drm_rect_width(&pstate->base.dst),
1818 return min(method1, method2);
1822 * For both WM_PIPE and WM_LP.
1823 * mem_value must be in 0.1us units.
1825 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
1826 const struct intel_plane_state *pstate,
1832 * Treat cursor with fb as always visible since cursor updates
1833 * can happen faster than the vrefresh rate, and the current
1834 * watermark code doesn't handle that correctly. Cursor updates
1835 * which set/clear the fb or change the cursor size are going
1836 * to get throttled by intel_legacy_cursor_update() to work
1837 * around this problem with the watermark code.
1839 if (!cstate->base.active || !pstate->base.fb)
1842 cpp = pstate->base.fb->format->cpp[0];
1844 return ilk_wm_method2(cstate->pixel_rate,
1845 cstate->base.adjusted_mode.crtc_htotal,
1846 pstate->base.crtc_w, cpp, mem_value);
1849 /* Only for WM_LP. */
1850 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1851 const struct intel_plane_state *pstate,
1856 if (!cstate->base.active || !pstate->base.visible)
1859 cpp = pstate->base.fb->format->cpp[0];
1861 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
1865 ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
1867 if (INTEL_GEN(dev_priv) >= 8)
1869 else if (INTEL_GEN(dev_priv) >= 7)
1876 ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
1877 int level, bool is_sprite)
1879 if (INTEL_GEN(dev_priv) >= 8)
1880 /* BDW primary/sprite plane watermarks */
1881 return level == 0 ? 255 : 2047;
1882 else if (INTEL_GEN(dev_priv) >= 7)
1883 /* IVB/HSW primary/sprite plane watermarks */
1884 return level == 0 ? 127 : 1023;
1885 else if (!is_sprite)
1886 /* ILK/SNB primary plane watermarks */
1887 return level == 0 ? 127 : 511;
1889 /* ILK/SNB sprite plane watermarks */
1890 return level == 0 ? 63 : 255;
1894 ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
1896 if (INTEL_GEN(dev_priv) >= 7)
1897 return level == 0 ? 63 : 255;
1899 return level == 0 ? 31 : 63;
1902 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
1904 if (INTEL_GEN(dev_priv) >= 8)
1910 /* Calculate the maximum primary/sprite plane watermark */
1911 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1913 const struct intel_wm_config *config,
1914 enum intel_ddb_partitioning ddb_partitioning,
1917 struct drm_i915_private *dev_priv = to_i915(dev);
1918 unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
1920 /* if sprites aren't enabled, sprites get nothing */
1921 if (is_sprite && !config->sprites_enabled)
1924 /* HSW allows LP1+ watermarks even with multiple pipes */
1925 if (level == 0 || config->num_pipes_active > 1) {
1926 fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
1929 * For some reason the non self refresh
1930 * FIFO size is only half of the self
1931 * refresh FIFO size on ILK/SNB.
1933 if (INTEL_GEN(dev_priv) <= 6)
1937 if (config->sprites_enabled) {
1938 /* level 0 is always calculated with 1:1 split */
1939 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1948 /* clamp to max that the registers can hold */
1949 return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
1952 /* Calculate the maximum cursor plane watermark */
1953 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1955 const struct intel_wm_config *config)
1957 /* HSW LP1+ watermarks w/ multiple pipes */
1958 if (level > 0 && config->num_pipes_active > 1)
1961 /* otherwise just report max that registers can hold */
1962 return ilk_cursor_wm_reg_max(to_i915(dev), level);
1965 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1967 const struct intel_wm_config *config,
1968 enum intel_ddb_partitioning ddb_partitioning,
1969 struct ilk_wm_maximums *max)
1971 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1972 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1973 max->cur = ilk_cursor_wm_max(dev, level, config);
1974 max->fbc = ilk_fbc_wm_reg_max(to_i915(dev));
1977 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
1979 struct ilk_wm_maximums *max)
1981 max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
1982 max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
1983 max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
1984 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
1987 static bool ilk_validate_wm_level(int level,
1988 const struct ilk_wm_maximums *max,
1989 struct intel_wm_level *result)
1993 /* already determined to be invalid? */
1994 if (!result->enable)
1997 result->enable = result->pri_val <= max->pri &&
1998 result->spr_val <= max->spr &&
1999 result->cur_val <= max->cur;
2001 ret = result->enable;
2004 * HACK until we can pre-compute everything,
2005 * and thus fail gracefully if LP0 watermarks
2008 if (level == 0 && !result->enable) {
2009 if (result->pri_val > max->pri)
2010 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2011 level, result->pri_val, max->pri);
2012 if (result->spr_val > max->spr)
2013 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2014 level, result->spr_val, max->spr);
2015 if (result->cur_val > max->cur)
2016 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2017 level, result->cur_val, max->cur);
2019 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2020 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2021 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2022 result->enable = true;
2028 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2029 const struct intel_crtc *intel_crtc,
2031 struct intel_crtc_state *cstate,
2032 struct intel_plane_state *pristate,
2033 struct intel_plane_state *sprstate,
2034 struct intel_plane_state *curstate,
2035 struct intel_wm_level *result)
2037 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2038 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2039 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2041 /* WM1+ latency values stored in 0.5us units */
2049 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2050 pri_latency, level);
2051 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2055 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2058 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2060 result->enable = true;
2064 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2066 const struct intel_atomic_state *intel_state =
2067 to_intel_atomic_state(cstate->base.state);
2068 const struct drm_display_mode *adjusted_mode =
2069 &cstate->base.adjusted_mode;
2070 u32 linetime, ips_linetime;
2072 if (!cstate->base.active)
2074 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2076 if (WARN_ON(intel_state->cdclk.logical.cdclk == 0))
2079 /* The WM are computed with base on how long it takes to fill a single
2080 * row at the given clock rate, multiplied by 8.
2082 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2083 adjusted_mode->crtc_clock);
2084 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2085 intel_state->cdclk.logical.cdclk);
2087 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2088 PIPE_WM_LINETIME_TIME(linetime);
2091 static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2094 if (IS_GEN9(dev_priv)) {
2097 int level, max_level = ilk_wm_max_level(dev_priv);
2099 /* read the first set of memory latencies[0:3] */
2100 val = 0; /* data0 to be programmed to 0 for first set */
2101 mutex_lock(&dev_priv->rps.hw_lock);
2102 ret = sandybridge_pcode_read(dev_priv,
2103 GEN9_PCODE_READ_MEM_LATENCY,
2105 mutex_unlock(&dev_priv->rps.hw_lock);
2108 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2112 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2113 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2114 GEN9_MEM_LATENCY_LEVEL_MASK;
2115 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2116 GEN9_MEM_LATENCY_LEVEL_MASK;
2117 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2118 GEN9_MEM_LATENCY_LEVEL_MASK;
2120 /* read the second set of memory latencies[4:7] */
2121 val = 1; /* data0 to be programmed to 1 for second set */
2122 mutex_lock(&dev_priv->rps.hw_lock);
2123 ret = sandybridge_pcode_read(dev_priv,
2124 GEN9_PCODE_READ_MEM_LATENCY,
2126 mutex_unlock(&dev_priv->rps.hw_lock);
2128 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2132 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2133 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2134 GEN9_MEM_LATENCY_LEVEL_MASK;
2135 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2136 GEN9_MEM_LATENCY_LEVEL_MASK;
2137 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2138 GEN9_MEM_LATENCY_LEVEL_MASK;
2141 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2142 * need to be disabled. We make sure to sanitize the values out
2143 * of the punit to satisfy this requirement.
2145 for (level = 1; level <= max_level; level++) {
2146 if (wm[level] == 0) {
2147 for (i = level + 1; i <= max_level; i++)
2154 * WaWmMemoryReadLatency:skl,glk
2156 * punit doesn't take into account the read latency so we need
2157 * to add 2us to the various latency levels we retrieve from the
2158 * punit when level 0 response data us 0us.
2162 for (level = 1; level <= max_level; level++) {
2169 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2170 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2172 wm[0] = (sskpd >> 56) & 0xFF;
2174 wm[0] = sskpd & 0xF;
2175 wm[1] = (sskpd >> 4) & 0xFF;
2176 wm[2] = (sskpd >> 12) & 0xFF;
2177 wm[3] = (sskpd >> 20) & 0x1FF;
2178 wm[4] = (sskpd >> 32) & 0x1FF;
2179 } else if (INTEL_GEN(dev_priv) >= 6) {
2180 uint32_t sskpd = I915_READ(MCH_SSKPD);
2182 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2183 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2184 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2185 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2186 } else if (INTEL_GEN(dev_priv) >= 5) {
2187 uint32_t mltr = I915_READ(MLTR_ILK);
2189 /* ILK primary LP0 latency is 700 ns */
2191 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2192 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2196 static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2199 /* ILK sprite LP0 latency is 1300 ns */
2200 if (IS_GEN5(dev_priv))
2204 static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2207 /* ILK cursor LP0 latency is 1300 ns */
2208 if (IS_GEN5(dev_priv))
2211 /* WaDoubleCursorLP3Latency:ivb */
2212 if (IS_IVYBRIDGE(dev_priv))
2216 int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
2218 /* how many WM levels are we expecting */
2219 if (INTEL_GEN(dev_priv) >= 9)
2221 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2223 else if (INTEL_GEN(dev_priv) >= 6)
2229 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
2231 const uint16_t wm[8])
2233 int level, max_level = ilk_wm_max_level(dev_priv);
2235 for (level = 0; level <= max_level; level++) {
2236 unsigned int latency = wm[level];
2239 DRM_ERROR("%s WM%d latency not provided\n",
2245 * - latencies are in us on gen9.
2246 * - before then, WM1+ latency values are in 0.5us units
2248 if (IS_GEN9(dev_priv))
2253 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2254 name, level, wm[level],
2255 latency / 10, latency % 10);
2259 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2260 uint16_t wm[5], uint16_t min)
2262 int level, max_level = ilk_wm_max_level(dev_priv);
2267 wm[0] = max(wm[0], min);
2268 for (level = 1; level <= max_level; level++)
2269 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2274 static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
2279 * The BIOS provided WM memory latency values are often
2280 * inadequate for high resolution displays. Adjust them.
2282 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2283 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2284 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2289 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2290 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
2291 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
2292 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
2295 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
2297 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
2299 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2300 sizeof(dev_priv->wm.pri_latency));
2301 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2302 sizeof(dev_priv->wm.pri_latency));
2304 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
2305 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
2307 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
2308 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
2309 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
2311 if (IS_GEN6(dev_priv))
2312 snb_wm_latency_quirk(dev_priv);
2315 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
2317 intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
2318 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
2321 static bool ilk_validate_pipe_wm(struct drm_device *dev,
2322 struct intel_pipe_wm *pipe_wm)
2324 /* LP0 watermark maximums depend on this pipe alone */
2325 const struct intel_wm_config config = {
2326 .num_pipes_active = 1,
2327 .sprites_enabled = pipe_wm->sprites_enabled,
2328 .sprites_scaled = pipe_wm->sprites_scaled,
2330 struct ilk_wm_maximums max;
2332 /* LP0 watermarks always use 1/2 DDB partitioning */
2333 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2335 /* At least LP0 must be valid */
2336 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
2337 DRM_DEBUG_KMS("LP0 watermark invalid\n");
2344 /* Compute new watermarks for the pipe */
2345 static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2347 struct drm_atomic_state *state = cstate->base.state;
2348 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2349 struct intel_pipe_wm *pipe_wm;
2350 struct drm_device *dev = state->dev;
2351 const struct drm_i915_private *dev_priv = to_i915(dev);
2352 struct intel_plane *intel_plane;
2353 struct intel_plane_state *pristate = NULL;
2354 struct intel_plane_state *sprstate = NULL;
2355 struct intel_plane_state *curstate = NULL;
2356 int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
2357 struct ilk_wm_maximums max;
2359 pipe_wm = &cstate->wm.ilk.optimal;
2361 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2362 struct intel_plane_state *ps;
2364 ps = intel_atomic_get_existing_plane_state(state,
2369 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
2371 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
2373 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2377 pipe_wm->pipe_enabled = cstate->base.active;
2379 pipe_wm->sprites_enabled = sprstate->base.visible;
2380 pipe_wm->sprites_scaled = sprstate->base.visible &&
2381 (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
2382 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
2385 usable_level = max_level;
2387 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2388 if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
2391 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2392 if (pipe_wm->sprites_scaled)
2395 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
2396 pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
2398 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
2399 pipe_wm->wm[0] = pipe_wm->raw_wm[0];
2401 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2402 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
2404 if (!ilk_validate_pipe_wm(dev, pipe_wm))
2407 ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
2409 for (level = 1; level <= max_level; level++) {
2410 struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
2412 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
2413 pristate, sprstate, curstate, wm);
2416 * Disable any watermark level that exceeds the
2417 * register maximums since such watermarks are
2420 if (level > usable_level)
2423 if (ilk_validate_wm_level(level, &max, wm))
2424 pipe_wm->wm[level] = *wm;
2426 usable_level = level;
2433 * Build a set of 'intermediate' watermark values that satisfy both the old
2434 * state and the new state. These can be programmed to the hardware
2437 static int ilk_compute_intermediate_wm(struct drm_device *dev,
2438 struct intel_crtc *intel_crtc,
2439 struct intel_crtc_state *newstate)
2441 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
2442 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
2443 int level, max_level = ilk_wm_max_level(to_i915(dev));
2446 * Start with the final, target watermarks, then combine with the
2447 * currently active watermarks to get values that are safe both before
2448 * and after the vblank.
2450 *a = newstate->wm.ilk.optimal;
2451 a->pipe_enabled |= b->pipe_enabled;
2452 a->sprites_enabled |= b->sprites_enabled;
2453 a->sprites_scaled |= b->sprites_scaled;
2455 for (level = 0; level <= max_level; level++) {
2456 struct intel_wm_level *a_wm = &a->wm[level];
2457 const struct intel_wm_level *b_wm = &b->wm[level];
2459 a_wm->enable &= b_wm->enable;
2460 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
2461 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
2462 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
2463 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
2467 * We need to make sure that these merged watermark values are
2468 * actually a valid configuration themselves. If they're not,
2469 * there's no safe way to transition from the old state to
2470 * the new state, so we need to fail the atomic transaction.
2472 if (!ilk_validate_pipe_wm(dev, a))
2476 * If our intermediate WM are identical to the final WM, then we can
2477 * omit the post-vblank programming; only update if it's different.
2479 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
2480 newstate->wm.need_postvbl_update = false;
2486 * Merge the watermarks from all active pipes for a specific level.
2488 static void ilk_merge_wm_level(struct drm_device *dev,
2490 struct intel_wm_level *ret_wm)
2492 const struct intel_crtc *intel_crtc;
2494 ret_wm->enable = true;
2496 for_each_intel_crtc(dev, intel_crtc) {
2497 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
2498 const struct intel_wm_level *wm = &active->wm[level];
2500 if (!active->pipe_enabled)
2504 * The watermark values may have been used in the past,
2505 * so we must maintain them in the registers for some
2506 * time even if the level is now disabled.
2509 ret_wm->enable = false;
2511 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2512 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2513 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2514 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2519 * Merge all low power watermarks for all active pipes.
2521 static void ilk_wm_merge(struct drm_device *dev,
2522 const struct intel_wm_config *config,
2523 const struct ilk_wm_maximums *max,
2524 struct intel_pipe_wm *merged)
2526 struct drm_i915_private *dev_priv = to_i915(dev);
2527 int level, max_level = ilk_wm_max_level(dev_priv);
2528 int last_enabled_level = max_level;
2530 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2531 if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
2532 config->num_pipes_active > 1)
2533 last_enabled_level = 0;
2535 /* ILK: FBC WM must be disabled always */
2536 merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
2538 /* merge each WM1+ level */
2539 for (level = 1; level <= max_level; level++) {
2540 struct intel_wm_level *wm = &merged->wm[level];
2542 ilk_merge_wm_level(dev, level, wm);
2544 if (level > last_enabled_level)
2546 else if (!ilk_validate_wm_level(level, max, wm))
2547 /* make sure all following levels get disabled */
2548 last_enabled_level = level - 1;
2551 * The spec says it is preferred to disable
2552 * FBC WMs instead of disabling a WM level.
2554 if (wm->fbc_val > max->fbc) {
2556 merged->fbc_wm_enabled = false;
2561 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2563 * FIXME this is racy. FBC might get enabled later.
2564 * What we should check here is whether FBC can be
2565 * enabled sometime later.
2567 if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
2568 intel_fbc_is_active(dev_priv)) {
2569 for (level = 2; level <= max_level; level++) {
2570 struct intel_wm_level *wm = &merged->wm[level];
2577 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2579 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2580 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2583 /* The value we need to program into the WM_LPx latency field */
2584 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2586 struct drm_i915_private *dev_priv = to_i915(dev);
2588 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2591 return dev_priv->wm.pri_latency[level];
2594 static void ilk_compute_wm_results(struct drm_device *dev,
2595 const struct intel_pipe_wm *merged,
2596 enum intel_ddb_partitioning partitioning,
2597 struct ilk_wm_values *results)
2599 struct drm_i915_private *dev_priv = to_i915(dev);
2600 struct intel_crtc *intel_crtc;
2603 results->enable_fbc_wm = merged->fbc_wm_enabled;
2604 results->partitioning = partitioning;
2606 /* LP1+ register values */
2607 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2608 const struct intel_wm_level *r;
2610 level = ilk_wm_lp_to_level(wm_lp, merged);
2612 r = &merged->wm[level];
2615 * Maintain the watermark values even if the level is
2616 * disabled. Doing otherwise could cause underruns.
2618 results->wm_lp[wm_lp - 1] =
2619 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2620 (r->pri_val << WM1_LP_SR_SHIFT) |
2624 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2626 if (INTEL_GEN(dev_priv) >= 8)
2627 results->wm_lp[wm_lp - 1] |=
2628 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2630 results->wm_lp[wm_lp - 1] |=
2631 r->fbc_val << WM1_LP_FBC_SHIFT;
2634 * Always set WM1S_LP_EN when spr_val != 0, even if the
2635 * level is disabled. Doing otherwise could cause underruns.
2637 if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
2638 WARN_ON(wm_lp != 1);
2639 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2641 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2644 /* LP0 register values */
2645 for_each_intel_crtc(dev, intel_crtc) {
2646 enum pipe pipe = intel_crtc->pipe;
2647 const struct intel_wm_level *r =
2648 &intel_crtc->wm.active.ilk.wm[0];
2650 if (WARN_ON(!r->enable))
2653 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
2655 results->wm_pipe[pipe] =
2656 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2657 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2662 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2663 * case both are at the same level. Prefer r1 in case they're the same. */
2664 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2665 struct intel_pipe_wm *r1,
2666 struct intel_pipe_wm *r2)
2668 int level, max_level = ilk_wm_max_level(to_i915(dev));
2669 int level1 = 0, level2 = 0;
2671 for (level = 1; level <= max_level; level++) {
2672 if (r1->wm[level].enable)
2674 if (r2->wm[level].enable)
2678 if (level1 == level2) {
2679 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2683 } else if (level1 > level2) {
2690 /* dirty bits used to track which watermarks need changes */
2691 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2692 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2693 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2694 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2695 #define WM_DIRTY_FBC (1 << 24)
2696 #define WM_DIRTY_DDB (1 << 25)
2698 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2699 const struct ilk_wm_values *old,
2700 const struct ilk_wm_values *new)
2702 unsigned int dirty = 0;
2706 for_each_pipe(dev_priv, pipe) {
2707 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2708 dirty |= WM_DIRTY_LINETIME(pipe);
2709 /* Must disable LP1+ watermarks too */
2710 dirty |= WM_DIRTY_LP_ALL;
2713 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2714 dirty |= WM_DIRTY_PIPE(pipe);
2715 /* Must disable LP1+ watermarks too */
2716 dirty |= WM_DIRTY_LP_ALL;
2720 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2721 dirty |= WM_DIRTY_FBC;
2722 /* Must disable LP1+ watermarks too */
2723 dirty |= WM_DIRTY_LP_ALL;
2726 if (old->partitioning != new->partitioning) {
2727 dirty |= WM_DIRTY_DDB;
2728 /* Must disable LP1+ watermarks too */
2729 dirty |= WM_DIRTY_LP_ALL;
2732 /* LP1+ watermarks already deemed dirty, no need to continue */
2733 if (dirty & WM_DIRTY_LP_ALL)
2736 /* Find the lowest numbered LP1+ watermark in need of an update... */
2737 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2738 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2739 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2743 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2744 for (; wm_lp <= 3; wm_lp++)
2745 dirty |= WM_DIRTY_LP(wm_lp);
2750 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2753 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2754 bool changed = false;
2756 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2757 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2758 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2761 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2762 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2763 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2766 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2767 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2768 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2773 * Don't touch WM1S_LP_EN here.
2774 * Doing so could cause underruns.
2781 * The spec says we shouldn't write when we don't need, because every write
2782 * causes WMs to be re-evaluated, expending some power.
2784 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2785 struct ilk_wm_values *results)
2787 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2791 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2795 _ilk_disable_lp_wm(dev_priv, dirty);
2797 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2798 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2799 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2800 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2801 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2802 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2804 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2805 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2806 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2807 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2808 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2809 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2811 if (dirty & WM_DIRTY_DDB) {
2812 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2813 val = I915_READ(WM_MISC);
2814 if (results->partitioning == INTEL_DDB_PART_1_2)
2815 val &= ~WM_MISC_DATA_PARTITION_5_6;
2817 val |= WM_MISC_DATA_PARTITION_5_6;
2818 I915_WRITE(WM_MISC, val);
2820 val = I915_READ(DISP_ARB_CTL2);
2821 if (results->partitioning == INTEL_DDB_PART_1_2)
2822 val &= ~DISP_DATA_PARTITION_5_6;
2824 val |= DISP_DATA_PARTITION_5_6;
2825 I915_WRITE(DISP_ARB_CTL2, val);
2829 if (dirty & WM_DIRTY_FBC) {
2830 val = I915_READ(DISP_ARB_CTL);
2831 if (results->enable_fbc_wm)
2832 val &= ~DISP_FBC_WM_DIS;
2834 val |= DISP_FBC_WM_DIS;
2835 I915_WRITE(DISP_ARB_CTL, val);
2838 if (dirty & WM_DIRTY_LP(1) &&
2839 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2840 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2842 if (INTEL_GEN(dev_priv) >= 7) {
2843 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2844 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2845 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2846 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2849 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2850 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2851 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2852 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2853 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2854 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2856 dev_priv->wm.hw = *results;
2859 bool ilk_disable_lp_wm(struct drm_device *dev)
2861 struct drm_i915_private *dev_priv = to_i915(dev);
2863 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2866 #define SKL_SAGV_BLOCK_TIME 30 /* µs */
2869 * FIXME: We still don't have the proper code detect if we need to apply the WA,
2870 * so assume we'll always need it in order to avoid underruns.
2872 static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
2874 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2876 if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
2883 intel_has_sagv(struct drm_i915_private *dev_priv)
2885 if (IS_KABYLAKE(dev_priv))
2888 if (IS_SKYLAKE(dev_priv) &&
2889 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
2896 * SAGV dynamically adjusts the system agent voltage and clock frequencies
2897 * depending on power and performance requirements. The display engine access
2898 * to system memory is blocked during the adjustment time. Because of the
2899 * blocking time, having this enabled can cause full system hangs and/or pipe
2900 * underruns if we don't meet all of the following requirements:
2902 * - <= 1 pipe enabled
2903 * - All planes can enable watermarks for latencies >= SAGV engine block time
2904 * - We're not using an interlaced display configuration
2907 intel_enable_sagv(struct drm_i915_private *dev_priv)
2911 if (!intel_has_sagv(dev_priv))
2914 if (dev_priv->sagv_status == I915_SAGV_ENABLED)
2917 DRM_DEBUG_KMS("Enabling the SAGV\n");
2918 mutex_lock(&dev_priv->rps.hw_lock);
2920 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2923 /* We don't need to wait for the SAGV when enabling */
2924 mutex_unlock(&dev_priv->rps.hw_lock);
2927 * Some skl systems, pre-release machines in particular,
2928 * don't actually have an SAGV.
2930 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
2931 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2932 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
2934 } else if (ret < 0) {
2935 DRM_ERROR("Failed to enable the SAGV\n");
2939 dev_priv->sagv_status = I915_SAGV_ENABLED;
2944 intel_disable_sagv(struct drm_i915_private *dev_priv)
2948 if (!intel_has_sagv(dev_priv))
2951 if (dev_priv->sagv_status == I915_SAGV_DISABLED)
2954 DRM_DEBUG_KMS("Disabling the SAGV\n");
2955 mutex_lock(&dev_priv->rps.hw_lock);
2957 /* bspec says to keep retrying for at least 1 ms */
2958 ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2960 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
2962 mutex_unlock(&dev_priv->rps.hw_lock);
2965 * Some skl systems, pre-release machines in particular,
2966 * don't actually have an SAGV.
2968 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
2969 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2970 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
2972 } else if (ret < 0) {
2973 DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
2977 dev_priv->sagv_status = I915_SAGV_DISABLED;
2981 bool intel_can_enable_sagv(struct drm_atomic_state *state)
2983 struct drm_device *dev = state->dev;
2984 struct drm_i915_private *dev_priv = to_i915(dev);
2985 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
2986 struct intel_crtc *crtc;
2987 struct intel_plane *plane;
2988 struct intel_crtc_state *cstate;
2992 if (!intel_has_sagv(dev_priv))
2996 * SKL workaround: bspec recommends we disable the SAGV when we have
2997 * more then one pipe enabled
2999 * If there are no active CRTCs, no additional checks need be performed
3001 if (hweight32(intel_state->active_crtcs) == 0)
3003 else if (hweight32(intel_state->active_crtcs) > 1)
3006 /* Since we're now guaranteed to only have one active CRTC... */
3007 pipe = ffs(intel_state->active_crtcs) - 1;
3008 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
3009 cstate = to_intel_crtc_state(crtc->base.state);
3011 if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3014 for_each_intel_plane_on_crtc(dev, crtc, plane) {
3015 struct skl_plane_wm *wm =
3016 &cstate->wm.skl.optimal.planes[plane->id];
3018 /* Skip this plane if it's not enabled */
3019 if (!wm->wm[0].plane_en)
3022 /* Find the highest enabled wm level for this plane */
3023 for (level = ilk_wm_max_level(dev_priv);
3024 !wm->wm[level].plane_en; --level)
3027 latency = dev_priv->wm.skl_latency[level];
3029 if (skl_needs_memory_bw_wa(intel_state) &&
3030 plane->base.state->fb->modifier ==
3031 I915_FORMAT_MOD_X_TILED)
3035 * If any of the planes on this pipe don't enable wm levels
3036 * that incur memory latencies higher then 30µs we can't enable
3039 if (latency < SKL_SAGV_BLOCK_TIME)
3047 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
3048 const struct intel_crtc_state *cstate,
3049 struct skl_ddb_entry *alloc, /* out */
3050 int *num_active /* out */)
3052 struct drm_atomic_state *state = cstate->base.state;
3053 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3054 struct drm_i915_private *dev_priv = to_i915(dev);
3055 struct drm_crtc *for_crtc = cstate->base.crtc;
3056 unsigned int pipe_size, ddb_size;
3057 int nth_active_pipe;
3059 if (WARN_ON(!state) || !cstate->base.active) {
3062 *num_active = hweight32(dev_priv->active_crtcs);
3066 if (intel_state->active_pipe_changes)
3067 *num_active = hweight32(intel_state->active_crtcs);
3069 *num_active = hweight32(dev_priv->active_crtcs);
3071 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
3072 WARN_ON(ddb_size == 0);
3074 ddb_size -= 4; /* 4 blocks for bypass path allocation */
3077 * If the state doesn't change the active CRTC's, then there's
3078 * no need to recalculate; the existing pipe allocation limits
3079 * should remain unchanged. Note that we're safe from racing
3080 * commits since any racing commit that changes the active CRTC
3081 * list would need to grab _all_ crtc locks, including the one
3082 * we currently hold.
3084 if (!intel_state->active_pipe_changes) {
3086 * alloc may be cleared by clear_intel_crtc_state,
3087 * copy from old state to be sure
3089 *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
3093 nth_active_pipe = hweight32(intel_state->active_crtcs &
3094 (drm_crtc_mask(for_crtc) - 1));
3095 pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
3096 alloc->start = nth_active_pipe * ddb_size / *num_active;
3097 alloc->end = alloc->start + pipe_size;
3100 static unsigned int skl_cursor_allocation(int num_active)
3102 if (num_active == 1)
3108 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
3110 entry->start = reg & 0x3ff;
3111 entry->end = (reg >> 16) & 0x3ff;
3116 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
3117 struct skl_ddb_allocation *ddb /* out */)
3119 struct intel_crtc *crtc;
3121 memset(ddb, 0, sizeof(*ddb));
3123 for_each_intel_crtc(&dev_priv->drm, crtc) {
3124 enum intel_display_power_domain power_domain;
3125 enum plane_id plane_id;
3126 enum pipe pipe = crtc->pipe;
3128 power_domain = POWER_DOMAIN_PIPE(pipe);
3129 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3132 for_each_plane_id_on_crtc(crtc, plane_id) {
3135 if (plane_id != PLANE_CURSOR)
3136 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
3138 val = I915_READ(CUR_BUF_CFG(pipe));
3140 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val);
3143 intel_display_power_put(dev_priv, power_domain);
3148 * Determines the downscale amount of a plane for the purposes of watermark calculations.
3149 * The bspec defines downscale amount as:
3152 * Horizontal down scale amount = maximum[1, Horizontal source size /
3153 * Horizontal destination size]
3154 * Vertical down scale amount = maximum[1, Vertical source size /
3155 * Vertical destination size]
3156 * Total down scale amount = Horizontal down scale amount *
3157 * Vertical down scale amount
3160 * Return value is provided in 16.16 fixed point form to retain fractional part.
3161 * Caller should take care of dividing & rounding off the value.
3164 skl_plane_downscale_amount(const struct intel_plane_state *pstate)
3166 uint32_t downscale_h, downscale_w;
3167 uint32_t src_w, src_h, dst_w, dst_h;
3169 if (WARN_ON(!pstate->base.visible))
3170 return DRM_PLANE_HELPER_NO_SCALING;
3172 /* n.b., src is 16.16 fixed point, dst is whole integer */
3173 src_w = drm_rect_width(&pstate->base.src);
3174 src_h = drm_rect_height(&pstate->base.src);
3175 dst_w = drm_rect_width(&pstate->base.dst);
3176 dst_h = drm_rect_height(&pstate->base.dst);
3177 if (drm_rotation_90_or_270(pstate->base.rotation))
3180 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3181 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3183 /* Provide result in 16.16 fixed point */
3184 return (uint64_t)downscale_w * downscale_h >> 16;
3188 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
3189 const struct drm_plane_state *pstate,
3192 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3193 uint32_t down_scale_amount, data_rate;
3194 uint32_t width = 0, height = 0;
3195 struct drm_framebuffer *fb;
3198 if (!intel_pstate->base.visible)
3202 format = fb->format->format;
3204 if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
3206 if (y && format != DRM_FORMAT_NV12)
3209 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3210 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3212 if (drm_rotation_90_or_270(pstate->rotation))
3213 swap(width, height);
3215 /* for planar format */
3216 if (format == DRM_FORMAT_NV12) {
3217 if (y) /* y-plane data rate */
3218 data_rate = width * height *
3220 else /* uv-plane data rate */
3221 data_rate = (width / 2) * (height / 2) *
3224 /* for packed formats */
3225 data_rate = width * height * fb->format->cpp[0];
3228 down_scale_amount = skl_plane_downscale_amount(intel_pstate);
3230 return (uint64_t)data_rate * down_scale_amount >> 16;
3234 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
3235 * a 8192x4096@32bpp framebuffer:
3236 * 3 * 4096 * 8192 * 4 < 2^32
3239 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
3240 unsigned *plane_data_rate,
3241 unsigned *plane_y_data_rate)
3243 struct drm_crtc_state *cstate = &intel_cstate->base;
3244 struct drm_atomic_state *state = cstate->state;
3245 struct drm_plane *plane;
3246 const struct drm_plane_state *pstate;
3247 unsigned int total_data_rate = 0;
3249 if (WARN_ON(!state))
3252 /* Calculate and cache data rate for each plane */
3253 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
3254 enum plane_id plane_id = to_intel_plane(plane)->id;
3258 rate = skl_plane_relative_data_rate(intel_cstate,
3260 plane_data_rate[plane_id] = rate;
3262 total_data_rate += rate;
3265 rate = skl_plane_relative_data_rate(intel_cstate,
3267 plane_y_data_rate[plane_id] = rate;
3269 total_data_rate += rate;
3272 return total_data_rate;
3276 skl_ddb_min_alloc(const struct drm_plane_state *pstate,
3279 struct drm_framebuffer *fb = pstate->fb;
3280 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3281 uint32_t src_w, src_h;
3282 uint32_t min_scanlines = 8;
3288 /* For packed formats, no y-plane, return 0 */
3289 if (y && fb->format->format != DRM_FORMAT_NV12)
3292 /* For Non Y-tile return 8-blocks */
3293 if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
3294 fb->modifier != I915_FORMAT_MOD_Yf_TILED)
3297 src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
3298 src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
3300 if (drm_rotation_90_or_270(pstate->rotation))
3303 /* Halve UV plane width and height for NV12 */
3304 if (fb->format->format == DRM_FORMAT_NV12 && !y) {
3309 if (fb->format->format == DRM_FORMAT_NV12 && !y)
3310 plane_bpp = fb->format->cpp[1];
3312 plane_bpp = fb->format->cpp[0];
3314 if (drm_rotation_90_or_270(pstate->rotation)) {
3315 switch (plane_bpp) {
3329 WARN(1, "Unsupported pixel depth %u for rotation",
3335 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
3339 skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
3340 uint16_t *minimum, uint16_t *y_minimum)
3342 const struct drm_plane_state *pstate;
3343 struct drm_plane *plane;
3345 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
3346 enum plane_id plane_id = to_intel_plane(plane)->id;
3348 if (plane_id == PLANE_CURSOR)
3351 if (!pstate->visible)
3354 minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
3355 y_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
3358 minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
3362 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3363 struct skl_ddb_allocation *ddb /* out */)
3365 struct drm_atomic_state *state = cstate->base.state;
3366 struct drm_crtc *crtc = cstate->base.crtc;
3367 struct drm_device *dev = crtc->dev;
3368 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3369 enum pipe pipe = intel_crtc->pipe;
3370 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
3371 uint16_t alloc_size, start;
3372 uint16_t minimum[I915_MAX_PLANES] = {};
3373 uint16_t y_minimum[I915_MAX_PLANES] = {};
3374 unsigned int total_data_rate;
3375 enum plane_id plane_id;
3377 unsigned plane_data_rate[I915_MAX_PLANES] = {};
3378 unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
3380 /* Clear the partitioning for disabled planes. */
3381 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3382 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3384 if (WARN_ON(!state))
3387 if (!cstate->base.active) {
3388 alloc->start = alloc->end = 0;
3392 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
3393 alloc_size = skl_ddb_entry_size(alloc);
3394 if (alloc_size == 0) {
3395 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3399 skl_ddb_calc_min(cstate, num_active, minimum, y_minimum);
3402 * 1. Allocate the mininum required blocks for each active plane
3403 * and allocate the cursor, it doesn't require extra allocation
3404 * proportional to the data rate.
3407 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
3408 alloc_size -= minimum[plane_id];
3409 alloc_size -= y_minimum[plane_id];
3412 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
3413 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
3416 * 2. Distribute the remaining space in proportion to the amount of
3417 * data each plane needs to fetch from memory.
3419 * FIXME: we may not allocate every single block here.
3421 total_data_rate = skl_get_total_relative_data_rate(cstate,
3424 if (total_data_rate == 0)
3427 start = alloc->start;
3428 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
3429 unsigned int data_rate, y_data_rate;
3430 uint16_t plane_blocks, y_plane_blocks = 0;
3432 if (plane_id == PLANE_CURSOR)
3435 data_rate = plane_data_rate[plane_id];
3438 * allocation for (packed formats) or (uv-plane part of planar format):
3439 * promote the expression to 64 bits to avoid overflowing, the
3440 * result is < available as data_rate / total_data_rate < 1
3442 plane_blocks = minimum[plane_id];
3443 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3446 /* Leave disabled planes at (0,0) */
3448 ddb->plane[pipe][plane_id].start = start;
3449 ddb->plane[pipe][plane_id].end = start + plane_blocks;
3452 start += plane_blocks;
3455 * allocation for y_plane part of planar format:
3457 y_data_rate = plane_y_data_rate[plane_id];
3459 y_plane_blocks = y_minimum[plane_id];
3460 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3464 ddb->y_plane[pipe][plane_id].start = start;
3465 ddb->y_plane[pipe][plane_id].end = start + y_plane_blocks;
3468 start += y_plane_blocks;
3475 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3476 * for the read latency) and cpp should always be <= 8, so that
3477 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3478 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3480 static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp,
3483 uint32_t wm_intermediate_val;
3484 uint_fixed_16_16_t ret;
3487 return FP_16_16_MAX;
3489 wm_intermediate_val = latency * pixel_rate * cpp;
3490 ret = fixed_16_16_div_round_up_u64(wm_intermediate_val, 1000 * 512);
3494 static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
3495 uint32_t pipe_htotal,
3497 uint_fixed_16_16_t plane_blocks_per_line)
3499 uint32_t wm_intermediate_val;
3500 uint_fixed_16_16_t ret;
3503 return FP_16_16_MAX;
3505 wm_intermediate_val = latency * pixel_rate;
3506 wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
3507 pipe_htotal * 1000);
3508 ret = mul_u32_fixed_16_16(wm_intermediate_val, plane_blocks_per_line);
3512 static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
3513 struct intel_plane_state *pstate)
3515 uint64_t adjusted_pixel_rate;
3516 uint64_t downscale_amount;
3517 uint64_t pixel_rate;
3519 /* Shouldn't reach here on disabled planes... */
3520 if (WARN_ON(!pstate->base.visible))
3524 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3525 * with additional adjustments for plane-specific scaling.
3527 adjusted_pixel_rate = cstate->pixel_rate;
3528 downscale_amount = skl_plane_downscale_amount(pstate);
3530 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
3531 WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
3536 static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3537 struct intel_crtc_state *cstate,
3538 struct intel_plane_state *intel_pstate,
3539 uint16_t ddb_allocation,
3541 uint16_t *out_blocks, /* out */
3542 uint8_t *out_lines, /* out */
3543 bool *enabled /* out */)
3545 struct drm_plane_state *pstate = &intel_pstate->base;
3546 struct drm_framebuffer *fb = pstate->fb;
3547 uint32_t latency = dev_priv->wm.skl_latency[level];
3548 uint_fixed_16_16_t method1, method2;
3549 uint_fixed_16_16_t plane_blocks_per_line;
3550 uint_fixed_16_16_t selected_result;
3551 uint32_t interm_pbpl;
3552 uint32_t plane_bytes_per_line;
3553 uint32_t res_blocks, res_lines;
3555 uint32_t width = 0, height = 0;
3556 uint32_t plane_pixel_rate;
3557 uint_fixed_16_16_t y_tile_minimum;
3558 uint32_t y_min_scanlines;
3559 struct intel_atomic_state *state =
3560 to_intel_atomic_state(cstate->base.state);
3561 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
3562 bool y_tiled, x_tiled;
3564 if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
3569 y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
3570 fb->modifier == I915_FORMAT_MOD_Yf_TILED;
3571 x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
3573 /* Display WA #1141: kbl. */
3574 if (IS_KABYLAKE(dev_priv) && dev_priv->ipc_enabled)
3577 if (apply_memory_bw_wa && x_tiled)
3580 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3581 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3583 if (drm_rotation_90_or_270(pstate->rotation))
3584 swap(width, height);
3586 cpp = fb->format->cpp[0];
3587 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
3589 if (drm_rotation_90_or_270(pstate->rotation)) {
3590 int cpp = (fb->format->format == DRM_FORMAT_NV12) ?
3591 fb->format->cpp[1] :
3596 y_min_scanlines = 16;
3599 y_min_scanlines = 8;
3602 y_min_scanlines = 4;
3609 y_min_scanlines = 4;
3612 if (apply_memory_bw_wa)
3613 y_min_scanlines *= 2;
3615 plane_bytes_per_line = width * cpp;
3617 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line *
3618 y_min_scanlines, 512);
3619 plane_blocks_per_line =
3620 fixed_16_16_div_round_up(interm_pbpl, y_min_scanlines);
3621 } else if (x_tiled) {
3622 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512);
3623 plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
3625 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
3626 plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
3629 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
3630 method2 = skl_wm_method2(plane_pixel_rate,
3631 cstate->base.adjusted_mode.crtc_htotal,
3633 plane_blocks_per_line);
3635 y_tile_minimum = mul_u32_fixed_16_16(y_min_scanlines,
3636 plane_blocks_per_line);
3639 selected_result = max_fixed_16_16(method2, y_tile_minimum);
3641 if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
3642 (plane_bytes_per_line / 512 < 1))
3643 selected_result = method2;
3644 else if ((ddb_allocation /
3645 fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1)
3646 selected_result = min_fixed_16_16(method1, method2);
3648 selected_result = method1;
3651 res_blocks = fixed_16_16_to_u32_round_up(selected_result) + 1;
3652 res_lines = DIV_ROUND_UP(selected_result.val,
3653 plane_blocks_per_line.val);
3655 if (level >= 1 && level <= 7) {
3657 res_blocks += fixed_16_16_to_u32_round_up(y_tile_minimum);
3658 res_lines += y_min_scanlines;
3664 if (res_blocks >= ddb_allocation || res_lines > 31) {
3668 * If there are no valid level 0 watermarks, then we can't
3669 * support this display configuration.
3674 struct drm_plane *plane = pstate->plane;
3676 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
3677 DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n",
3678 plane->base.id, plane->name,
3679 res_blocks, ddb_allocation, res_lines);
3684 *out_blocks = res_blocks;
3685 *out_lines = res_lines;
3692 skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3693 struct skl_ddb_allocation *ddb,
3694 struct intel_crtc_state *cstate,
3695 struct intel_plane *intel_plane,
3697 struct skl_wm_level *result)
3699 struct drm_atomic_state *state = cstate->base.state;
3700 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3701 struct drm_plane *plane = &intel_plane->base;
3702 struct intel_plane_state *intel_pstate = NULL;
3703 uint16_t ddb_blocks;
3704 enum pipe pipe = intel_crtc->pipe;
3709 intel_atomic_get_existing_plane_state(state,
3713 * Note: If we start supporting multiple pending atomic commits against
3714 * the same planes/CRTC's in the future, plane->state will no longer be
3715 * the correct pre-state to use for the calculations here and we'll
3716 * need to change where we get the 'unchanged' plane data from.
3718 * For now this is fine because we only allow one queued commit against
3719 * a CRTC. Even if the plane isn't modified by this transaction and we
3720 * don't have a plane lock, we still have the CRTC's lock, so we know
3721 * that no other transactions are racing with us to update it.
3724 intel_pstate = to_intel_plane_state(plane->state);
3726 WARN_ON(!intel_pstate->base.fb);
3728 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][intel_plane->id]);
3730 ret = skl_compute_plane_wm(dev_priv,
3735 &result->plane_res_b,
3736 &result->plane_res_l,
3745 skl_compute_linetime_wm(struct intel_crtc_state *cstate)
3747 struct drm_atomic_state *state = cstate->base.state;
3748 struct drm_i915_private *dev_priv = to_i915(state->dev);
3749 uint32_t pixel_rate;
3750 uint32_t linetime_wm;
3752 if (!cstate->base.active)
3755 pixel_rate = cstate->pixel_rate;
3757 if (WARN_ON(pixel_rate == 0))
3760 linetime_wm = DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal *
3763 /* Display WA #1135: bxt. */
3764 if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled)
3765 linetime_wm = DIV_ROUND_UP(linetime_wm, 2);
3770 static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
3771 struct skl_wm_level *trans_wm /* out */)
3773 if (!cstate->base.active)
3776 /* Until we know more, just disable transition WMs */
3777 trans_wm->plane_en = false;
3780 static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
3781 struct skl_ddb_allocation *ddb,
3782 struct skl_pipe_wm *pipe_wm)
3784 struct drm_device *dev = cstate->base.crtc->dev;
3785 const struct drm_i915_private *dev_priv = to_i915(dev);
3786 struct intel_plane *intel_plane;
3787 struct skl_plane_wm *wm;
3788 int level, max_level = ilk_wm_max_level(dev_priv);
3792 * We'll only calculate watermarks for planes that are actually
3793 * enabled, so make sure all other planes are set as disabled.
3795 memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
3797 for_each_intel_plane_mask(&dev_priv->drm,
3799 cstate->base.plane_mask) {
3800 wm = &pipe_wm->planes[intel_plane->id];
3802 for (level = 0; level <= max_level; level++) {
3803 ret = skl_compute_wm_level(dev_priv, ddb, cstate,
3809 skl_compute_transition_wm(cstate, &wm->trans_wm);
3811 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
3816 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3818 const struct skl_ddb_entry *entry)
3821 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3826 static void skl_write_wm_level(struct drm_i915_private *dev_priv,
3828 const struct skl_wm_level *level)
3832 if (level->plane_en) {
3834 val |= level->plane_res_b;
3835 val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
3838 I915_WRITE(reg, val);
3841 static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
3842 const struct skl_plane_wm *wm,
3843 const struct skl_ddb_allocation *ddb,
3844 enum plane_id plane_id)
3846 struct drm_crtc *crtc = &intel_crtc->base;
3847 struct drm_device *dev = crtc->dev;
3848 struct drm_i915_private *dev_priv = to_i915(dev);
3849 int level, max_level = ilk_wm_max_level(dev_priv);
3850 enum pipe pipe = intel_crtc->pipe;
3852 for (level = 0; level <= max_level; level++) {
3853 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
3856 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
3859 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
3860 &ddb->plane[pipe][plane_id]);
3861 skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane_id),
3862 &ddb->y_plane[pipe][plane_id]);
3865 static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
3866 const struct skl_plane_wm *wm,
3867 const struct skl_ddb_allocation *ddb)
3869 struct drm_crtc *crtc = &intel_crtc->base;
3870 struct drm_device *dev = crtc->dev;
3871 struct drm_i915_private *dev_priv = to_i915(dev);
3872 int level, max_level = ilk_wm_max_level(dev_priv);
3873 enum pipe pipe = intel_crtc->pipe;
3875 for (level = 0; level <= max_level; level++) {
3876 skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
3879 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
3881 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3882 &ddb->plane[pipe][PLANE_CURSOR]);
3885 bool skl_wm_level_equals(const struct skl_wm_level *l1,
3886 const struct skl_wm_level *l2)
3888 if (l1->plane_en != l2->plane_en)
3891 /* If both planes aren't enabled, the rest shouldn't matter */
3895 return (l1->plane_res_l == l2->plane_res_l &&
3896 l1->plane_res_b == l2->plane_res_b);
3899 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
3900 const struct skl_ddb_entry *b)
3902 return a->start < b->end && b->start < a->end;
3905 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
3906 const struct skl_ddb_entry *ddb,
3911 for (i = 0; i < I915_MAX_PIPES; i++)
3912 if (i != ignore && entries[i] &&
3913 skl_ddb_entries_overlap(ddb, entries[i]))
3919 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
3920 const struct skl_pipe_wm *old_pipe_wm,
3921 struct skl_pipe_wm *pipe_wm, /* out */
3922 struct skl_ddb_allocation *ddb, /* out */
3923 bool *changed /* out */)
3925 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
3928 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
3932 if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm)))
3941 pipes_modified(struct drm_atomic_state *state)
3943 struct drm_crtc *crtc;
3944 struct drm_crtc_state *cstate;
3945 uint32_t i, ret = 0;
3947 for_each_crtc_in_state(state, crtc, cstate, i)
3948 ret |= drm_crtc_mask(crtc);
3954 skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
3956 struct drm_atomic_state *state = cstate->base.state;
3957 struct drm_device *dev = state->dev;
3958 struct drm_crtc *crtc = cstate->base.crtc;
3959 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3960 struct drm_i915_private *dev_priv = to_i915(dev);
3961 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3962 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
3963 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
3964 struct drm_plane_state *plane_state;
3965 struct drm_plane *plane;
3966 enum pipe pipe = intel_crtc->pipe;
3968 WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
3970 drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
3971 enum plane_id plane_id = to_intel_plane(plane)->id;
3973 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
3974 &new_ddb->plane[pipe][plane_id]) &&
3975 skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][plane_id],
3976 &new_ddb->y_plane[pipe][plane_id]))
3979 plane_state = drm_atomic_get_plane_state(state, plane);
3980 if (IS_ERR(plane_state))
3981 return PTR_ERR(plane_state);
3988 skl_compute_ddb(struct drm_atomic_state *state)
3990 struct drm_device *dev = state->dev;
3991 struct drm_i915_private *dev_priv = to_i915(dev);
3992 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3993 struct intel_crtc *intel_crtc;
3994 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
3995 uint32_t realloc_pipes = pipes_modified(state);
3999 * If this is our first atomic update following hardware readout,
4000 * we can't trust the DDB that the BIOS programmed for us. Let's
4001 * pretend that all pipes switched active status so that we'll
4002 * ensure a full DDB recompute.
4004 if (dev_priv->wm.distrust_bios_wm) {
4005 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4006 state->acquire_ctx);
4010 intel_state->active_pipe_changes = ~0;
4013 * We usually only initialize intel_state->active_crtcs if we
4014 * we're doing a modeset; make sure this field is always
4015 * initialized during the sanitization process that happens
4016 * on the first commit too.
4018 if (!intel_state->modeset)
4019 intel_state->active_crtcs = dev_priv->active_crtcs;
4023 * If the modeset changes which CRTC's are active, we need to
4024 * recompute the DDB allocation for *all* active pipes, even
4025 * those that weren't otherwise being modified in any way by this
4026 * atomic commit. Due to the shrinking of the per-pipe allocations
4027 * when new active CRTC's are added, it's possible for a pipe that
4028 * we were already using and aren't changing at all here to suddenly
4029 * become invalid if its DDB needs exceeds its new allocation.
4031 * Note that if we wind up doing a full DDB recompute, we can't let
4032 * any other display updates race with this transaction, so we need
4033 * to grab the lock on *all* CRTC's.
4035 if (intel_state->active_pipe_changes) {
4037 intel_state->wm_results.dirty_pipes = ~0;
4041 * We're not recomputing for the pipes not included in the commit, so
4042 * make sure we start with the current state.
4044 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
4046 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
4047 struct intel_crtc_state *cstate;
4049 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
4051 return PTR_ERR(cstate);
4053 ret = skl_allocate_pipe_ddb(cstate, ddb);
4057 ret = skl_ddb_add_affected_planes(cstate);
4066 skl_copy_wm_for_pipe(struct skl_wm_values *dst,
4067 struct skl_wm_values *src,
4070 memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
4071 sizeof(dst->ddb.y_plane[pipe]));
4072 memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
4073 sizeof(dst->ddb.plane[pipe]));
4077 skl_print_wm_changes(const struct drm_atomic_state *state)
4079 const struct drm_device *dev = state->dev;
4080 const struct drm_i915_private *dev_priv = to_i915(dev);
4081 const struct intel_atomic_state *intel_state =
4082 to_intel_atomic_state(state);
4083 const struct drm_crtc *crtc;
4084 const struct drm_crtc_state *cstate;
4085 const struct intel_plane *intel_plane;
4086 const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
4087 const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
4090 for_each_crtc_in_state(state, crtc, cstate, i) {
4091 const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4092 enum pipe pipe = intel_crtc->pipe;
4094 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
4095 enum plane_id plane_id = intel_plane->id;
4096 const struct skl_ddb_entry *old, *new;
4098 old = &old_ddb->plane[pipe][plane_id];
4099 new = &new_ddb->plane[pipe][plane_id];
4101 if (skl_ddb_entry_equal(old, new))
4104 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
4105 intel_plane->base.base.id,
4106 intel_plane->base.name,
4107 old->start, old->end,
4108 new->start, new->end);
4114 skl_compute_wm(struct drm_atomic_state *state)
4116 struct drm_crtc *crtc;
4117 struct drm_crtc_state *cstate;
4118 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4119 struct skl_wm_values *results = &intel_state->wm_results;
4120 struct skl_pipe_wm *pipe_wm;
4121 bool changed = false;
4125 * If this transaction isn't actually touching any CRTC's, don't
4126 * bother with watermark calculation. Note that if we pass this
4127 * test, we're guaranteed to hold at least one CRTC state mutex,
4128 * which means we can safely use values like dev_priv->active_crtcs
4129 * since any racing commits that want to update them would need to
4130 * hold _all_ CRTC state mutexes.
4132 for_each_crtc_in_state(state, crtc, cstate, i)
4137 /* Clear all dirty flags */
4138 results->dirty_pipes = 0;
4140 ret = skl_compute_ddb(state);
4145 * Calculate WM's for all pipes that are part of this transaction.
4146 * Note that the DDB allocation above may have added more CRTC's that
4147 * weren't otherwise being modified (and set bits in dirty_pipes) if
4148 * pipe allocations had to change.
4150 * FIXME: Now that we're doing this in the atomic check phase, we
4151 * should allow skl_update_pipe_wm() to return failure in cases where
4152 * no suitable watermark values can be found.
4154 for_each_crtc_in_state(state, crtc, cstate, i) {
4155 struct intel_crtc_state *intel_cstate =
4156 to_intel_crtc_state(cstate);
4157 const struct skl_pipe_wm *old_pipe_wm =
4158 &to_intel_crtc_state(crtc->state)->wm.skl.optimal;
4160 pipe_wm = &intel_cstate->wm.skl.optimal;
4161 ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
4162 &results->ddb, &changed);
4167 results->dirty_pipes |= drm_crtc_mask(crtc);
4169 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
4170 /* This pipe's WM's did not change */
4173 intel_cstate->update_wm_pre = true;
4176 skl_print_wm_changes(state);
4181 static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
4182 struct intel_crtc_state *cstate)
4184 struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
4185 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4186 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
4187 const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
4188 enum pipe pipe = crtc->pipe;
4189 enum plane_id plane_id;
4191 if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
4194 I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
4196 for_each_plane_id_on_crtc(crtc, plane_id) {
4197 if (plane_id != PLANE_CURSOR)
4198 skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
4201 skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
4206 static void skl_initial_wm(struct intel_atomic_state *state,
4207 struct intel_crtc_state *cstate)
4209 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4210 struct drm_device *dev = intel_crtc->base.dev;
4211 struct drm_i915_private *dev_priv = to_i915(dev);
4212 struct skl_wm_values *results = &state->wm_results;
4213 struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
4214 enum pipe pipe = intel_crtc->pipe;
4216 if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
4219 mutex_lock(&dev_priv->wm.wm_mutex);
4221 if (cstate->base.active_changed)
4222 skl_atomic_update_crtc_wm(state, cstate);
4224 skl_copy_wm_for_pipe(hw_vals, results, pipe);
4226 mutex_unlock(&dev_priv->wm.wm_mutex);
4229 static void ilk_compute_wm_config(struct drm_device *dev,
4230 struct intel_wm_config *config)
4232 struct intel_crtc *crtc;
4234 /* Compute the currently _active_ config */
4235 for_each_intel_crtc(dev, crtc) {
4236 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
4238 if (!wm->pipe_enabled)
4241 config->sprites_enabled |= wm->sprites_enabled;
4242 config->sprites_scaled |= wm->sprites_scaled;
4243 config->num_pipes_active++;
4247 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
4249 struct drm_device *dev = &dev_priv->drm;
4250 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
4251 struct ilk_wm_maximums max;
4252 struct intel_wm_config config = {};
4253 struct ilk_wm_values results = {};
4254 enum intel_ddb_partitioning partitioning;
4256 ilk_compute_wm_config(dev, &config);
4258 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
4259 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
4261 /* 5/6 split only in single pipe config on IVB+ */
4262 if (INTEL_GEN(dev_priv) >= 7 &&
4263 config.num_pipes_active == 1 && config.sprites_enabled) {
4264 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
4265 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
4267 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
4269 best_lp_wm = &lp_wm_1_2;
4272 partitioning = (best_lp_wm == &lp_wm_1_2) ?
4273 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
4275 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
4277 ilk_write_wm_values(dev_priv, &results);
4280 static void ilk_initial_watermarks(struct intel_atomic_state *state,
4281 struct intel_crtc_state *cstate)
4283 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4284 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4286 mutex_lock(&dev_priv->wm.wm_mutex);
4287 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
4288 ilk_program_watermarks(dev_priv);
4289 mutex_unlock(&dev_priv->wm.wm_mutex);
4292 static void ilk_optimize_watermarks(struct intel_atomic_state *state,
4293 struct intel_crtc_state *cstate)
4295 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4296 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4298 mutex_lock(&dev_priv->wm.wm_mutex);
4299 if (cstate->wm.need_postvbl_update) {
4300 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
4301 ilk_program_watermarks(dev_priv);
4303 mutex_unlock(&dev_priv->wm.wm_mutex);
4306 static inline void skl_wm_level_from_reg_val(uint32_t val,
4307 struct skl_wm_level *level)
4309 level->plane_en = val & PLANE_WM_EN;
4310 level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
4311 level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
4312 PLANE_WM_LINES_MASK;
4315 void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
4316 struct skl_pipe_wm *out)
4318 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4319 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4320 enum pipe pipe = intel_crtc->pipe;
4321 int level, max_level;
4322 enum plane_id plane_id;
4325 max_level = ilk_wm_max_level(dev_priv);
4327 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4328 struct skl_plane_wm *wm = &out->planes[plane_id];
4330 for (level = 0; level <= max_level; level++) {
4331 if (plane_id != PLANE_CURSOR)
4332 val = I915_READ(PLANE_WM(pipe, plane_id, level));
4334 val = I915_READ(CUR_WM(pipe, level));
4336 skl_wm_level_from_reg_val(val, &wm->wm[level]);
4339 if (plane_id != PLANE_CURSOR)
4340 val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
4342 val = I915_READ(CUR_WM_TRANS(pipe));
4344 skl_wm_level_from_reg_val(val, &wm->trans_wm);
4347 if (!intel_crtc->active)
4350 out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
4353 void skl_wm_get_hw_state(struct drm_device *dev)
4355 struct drm_i915_private *dev_priv = to_i915(dev);
4356 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
4357 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
4358 struct drm_crtc *crtc;
4359 struct intel_crtc *intel_crtc;
4360 struct intel_crtc_state *cstate;
4362 skl_ddb_get_hw_state(dev_priv, ddb);
4363 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4364 intel_crtc = to_intel_crtc(crtc);
4365 cstate = to_intel_crtc_state(crtc->state);
4367 skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
4369 if (intel_crtc->active)
4370 hw->dirty_pipes |= drm_crtc_mask(crtc);
4373 if (dev_priv->active_crtcs) {
4374 /* Fully recompute DDB on first atomic commit */
4375 dev_priv->wm.distrust_bios_wm = true;
4377 /* Easy/common case; just sanitize DDB now if everything off */
4378 memset(ddb, 0, sizeof(*ddb));
4382 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4384 struct drm_device *dev = crtc->dev;
4385 struct drm_i915_private *dev_priv = to_i915(dev);
4386 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4387 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4388 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4389 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
4390 enum pipe pipe = intel_crtc->pipe;
4391 static const i915_reg_t wm0_pipe_reg[] = {
4392 [PIPE_A] = WM0_PIPEA_ILK,
4393 [PIPE_B] = WM0_PIPEB_ILK,
4394 [PIPE_C] = WM0_PIPEC_IVB,
4397 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
4398 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4399 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
4401 memset(active, 0, sizeof(*active));
4403 active->pipe_enabled = intel_crtc->active;
4405 if (active->pipe_enabled) {
4406 u32 tmp = hw->wm_pipe[pipe];
4409 * For active pipes LP0 watermark is marked as
4410 * enabled, and LP1+ watermaks as disabled since
4411 * we can't really reverse compute them in case
4412 * multiple pipes are active.
4414 active->wm[0].enable = true;
4415 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
4416 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
4417 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
4418 active->linetime = hw->wm_linetime[pipe];
4420 int level, max_level = ilk_wm_max_level(dev_priv);
4423 * For inactive pipes, all watermark levels
4424 * should be marked as enabled but zeroed,
4425 * which is what we'd compute them to.
4427 for (level = 0; level <= max_level; level++)
4428 active->wm[level].enable = true;
4431 intel_crtc->wm.active.ilk = *active;
4434 #define _FW_WM(value, plane) \
4435 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
4436 #define _FW_WM_VLV(value, plane) \
4437 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
4439 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
4440 struct vlv_wm_values *wm)
4445 for_each_pipe(dev_priv, pipe) {
4446 tmp = I915_READ(VLV_DDL(pipe));
4448 wm->ddl[pipe].plane[PLANE_PRIMARY] =
4449 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4450 wm->ddl[pipe].plane[PLANE_CURSOR] =
4451 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4452 wm->ddl[pipe].plane[PLANE_SPRITE0] =
4453 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4454 wm->ddl[pipe].plane[PLANE_SPRITE1] =
4455 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4458 tmp = I915_READ(DSPFW1);
4459 wm->sr.plane = _FW_WM(tmp, SR);
4460 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
4461 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
4462 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
4464 tmp = I915_READ(DSPFW2);
4465 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
4466 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
4467 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
4469 tmp = I915_READ(DSPFW3);
4470 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
4472 if (IS_CHERRYVIEW(dev_priv)) {
4473 tmp = I915_READ(DSPFW7_CHV);
4474 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
4475 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
4477 tmp = I915_READ(DSPFW8_CHV);
4478 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
4479 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
4481 tmp = I915_READ(DSPFW9_CHV);
4482 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
4483 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
4485 tmp = I915_READ(DSPHOWM);
4486 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4487 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
4488 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
4489 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
4490 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4491 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4492 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
4493 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4494 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4495 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
4497 tmp = I915_READ(DSPFW7);
4498 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
4499 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
4501 tmp = I915_READ(DSPHOWM);
4502 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4503 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4504 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4505 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
4506 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4507 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4508 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
4515 void vlv_wm_get_hw_state(struct drm_device *dev)
4517 struct drm_i915_private *dev_priv = to_i915(dev);
4518 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
4519 struct intel_crtc *crtc;
4523 vlv_read_wm_values(dev_priv, wm);
4525 for_each_intel_crtc(dev, crtc)
4526 vlv_get_fifo_size(to_intel_crtc_state(crtc->base.state));
4528 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4529 wm->level = VLV_WM_LEVEL_PM2;
4531 if (IS_CHERRYVIEW(dev_priv)) {
4532 mutex_lock(&dev_priv->rps.hw_lock);
4534 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4535 if (val & DSP_MAXFIFO_PM5_ENABLE)
4536 wm->level = VLV_WM_LEVEL_PM5;
4539 * If DDR DVFS is disabled in the BIOS, Punit
4540 * will never ack the request. So if that happens
4541 * assume we don't have to enable/disable DDR DVFS
4542 * dynamically. To test that just set the REQ_ACK
4543 * bit to poke the Punit, but don't change the
4544 * HIGH/LOW bits so that we don't actually change
4545 * the current state.
4547 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4548 val |= FORCE_DDR_FREQ_REQ_ACK;
4549 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
4551 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
4552 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
4553 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
4554 "assuming DDR DVFS is disabled\n");
4555 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
4557 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4558 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
4559 wm->level = VLV_WM_LEVEL_DDR_DVFS;
4562 mutex_unlock(&dev_priv->rps.hw_lock);
4565 for_each_pipe(dev_priv, pipe)
4566 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4568 wm->pipe[pipe].plane[PLANE_PRIMARY],
4569 wm->pipe[pipe].plane[PLANE_CURSOR],
4570 wm->pipe[pipe].plane[PLANE_SPRITE0],
4571 wm->pipe[pipe].plane[PLANE_SPRITE1]);
4573 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4574 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4577 void ilk_wm_get_hw_state(struct drm_device *dev)
4579 struct drm_i915_private *dev_priv = to_i915(dev);
4580 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4581 struct drm_crtc *crtc;
4583 for_each_crtc(dev, crtc)
4584 ilk_pipe_wm_get_hw_state(crtc);
4586 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4587 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4588 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4590 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
4591 if (INTEL_GEN(dev_priv) >= 7) {
4592 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4593 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4596 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4597 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4598 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4599 else if (IS_IVYBRIDGE(dev_priv))
4600 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4601 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4604 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4608 * intel_update_watermarks - update FIFO watermark values based on current modes
4610 * Calculate watermark values for the various WM regs based on current mode
4611 * and plane configuration.
4613 * There are several cases to deal with here:
4614 * - normal (i.e. non-self-refresh)
4615 * - self-refresh (SR) mode
4616 * - lines are large relative to FIFO size (buffer can hold up to 2)
4617 * - lines are small relative to FIFO size (buffer can hold more than 2
4618 * lines), so need to account for TLB latency
4620 * The normal calculation is:
4621 * watermark = dotclock * bytes per pixel * latency
4622 * where latency is platform & configuration dependent (we assume pessimal
4625 * The SR calculation is:
4626 * watermark = (trunc(latency/line time)+1) * surface width *
4629 * line time = htotal / dotclock
4630 * surface width = hdisplay for normal plane and 64 for cursor
4631 * and latency is assumed to be high, as above.
4633 * The final value programmed to the register should always be rounded up,
4634 * and include an extra 2 entries to account for clock crossings.
4636 * We don't use the sprite, so we can ignore that. And on Crestline we have
4637 * to set the non-SR watermarks to 8.
4639 void intel_update_watermarks(struct intel_crtc *crtc)
4641 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4643 if (dev_priv->display.update_wm)
4644 dev_priv->display.update_wm(crtc);
4648 * Lock protecting IPS related data structures
4650 DEFINE_SPINLOCK(mchdev_lock);
4652 /* Global for IPS driver to get at the current i915 device. Protected by
4654 static struct drm_i915_private *i915_mch_dev;
4656 bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
4660 lockdep_assert_held(&mchdev_lock);
4662 rgvswctl = I915_READ16(MEMSWCTL);
4663 if (rgvswctl & MEMCTL_CMD_STS) {
4664 DRM_DEBUG("gpu busy, RCS change rejected\n");
4665 return false; /* still busy with another command */
4668 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4669 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4670 I915_WRITE16(MEMSWCTL, rgvswctl);
4671 POSTING_READ16(MEMSWCTL);
4673 rgvswctl |= MEMCTL_CMD_STS;
4674 I915_WRITE16(MEMSWCTL, rgvswctl);
4679 static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
4682 u8 fmax, fmin, fstart, vstart;
4684 spin_lock_irq(&mchdev_lock);
4686 rgvmodectl = I915_READ(MEMMODECTL);
4688 /* Enable temp reporting */
4689 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4690 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4692 /* 100ms RC evaluation intervals */
4693 I915_WRITE(RCUPEI, 100000);
4694 I915_WRITE(RCDNEI, 100000);
4696 /* Set max/min thresholds to 90ms and 80ms respectively */
4697 I915_WRITE(RCBMAXAVG, 90000);
4698 I915_WRITE(RCBMINAVG, 80000);
4700 I915_WRITE(MEMIHYST, 1);
4702 /* Set up min, max, and cur for interrupt handling */
4703 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4704 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4705 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4706 MEMMODE_FSTART_SHIFT;
4708 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
4711 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4712 dev_priv->ips.fstart = fstart;
4714 dev_priv->ips.max_delay = fstart;
4715 dev_priv->ips.min_delay = fmin;
4716 dev_priv->ips.cur_delay = fstart;
4718 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4719 fmax, fmin, fstart);
4721 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4724 * Interrupts will be enabled in ironlake_irq_postinstall
4727 I915_WRITE(VIDSTART, vstart);
4728 POSTING_READ(VIDSTART);
4730 rgvmodectl |= MEMMODE_SWMODE_EN;
4731 I915_WRITE(MEMMODECTL, rgvmodectl);
4733 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
4734 DRM_ERROR("stuck trying to change perf mode\n");
4737 ironlake_set_drps(dev_priv, fstart);
4739 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4740 I915_READ(DDREC) + I915_READ(CSIEC);
4741 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
4742 dev_priv->ips.last_count2 = I915_READ(GFXEC);
4743 dev_priv->ips.last_time2 = ktime_get_raw_ns();
4745 spin_unlock_irq(&mchdev_lock);
4748 static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
4752 spin_lock_irq(&mchdev_lock);
4754 rgvswctl = I915_READ16(MEMSWCTL);
4756 /* Ack interrupts, disable EFC interrupt */
4757 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4758 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4759 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4760 I915_WRITE(DEIIR, DE_PCU_EVENT);
4761 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4763 /* Go back to the starting frequency */
4764 ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
4766 rgvswctl |= MEMCTL_CMD_STS;
4767 I915_WRITE(MEMSWCTL, rgvswctl);
4770 spin_unlock_irq(&mchdev_lock);
4773 /* There's a funny hw issue where the hw returns all 0 when reading from
4774 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4775 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4776 * all limits and the gpu stuck at whatever frequency it is at atm).
4778 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
4782 /* Only set the down limit when we've reached the lowest level to avoid
4783 * getting more interrupts, otherwise leave this clear. This prevents a
4784 * race in the hw when coming out of rc6: There's a tiny window where
4785 * the hw runs at the minimal clock before selecting the desired
4786 * frequency, if the down threshold expires in that window we will not
4787 * receive a down interrupt. */
4788 if (IS_GEN9(dev_priv)) {
4789 limits = (dev_priv->rps.max_freq_softlimit) << 23;
4790 if (val <= dev_priv->rps.min_freq_softlimit)
4791 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
4793 limits = dev_priv->rps.max_freq_softlimit << 24;
4794 if (val <= dev_priv->rps.min_freq_softlimit)
4795 limits |= dev_priv->rps.min_freq_softlimit << 16;
4801 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4804 u32 threshold_up = 0, threshold_down = 0; /* in % */
4805 u32 ei_up = 0, ei_down = 0;
4807 new_power = dev_priv->rps.power;
4808 switch (dev_priv->rps.power) {
4810 if (val > dev_priv->rps.efficient_freq + 1 &&
4811 val > dev_priv->rps.cur_freq)
4812 new_power = BETWEEN;
4816 if (val <= dev_priv->rps.efficient_freq &&
4817 val < dev_priv->rps.cur_freq)
4818 new_power = LOW_POWER;
4819 else if (val >= dev_priv->rps.rp0_freq &&
4820 val > dev_priv->rps.cur_freq)
4821 new_power = HIGH_POWER;
4825 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
4826 val < dev_priv->rps.cur_freq)
4827 new_power = BETWEEN;
4830 /* Max/min bins are special */
4831 if (val <= dev_priv->rps.min_freq_softlimit)
4832 new_power = LOW_POWER;
4833 if (val >= dev_priv->rps.max_freq_softlimit)
4834 new_power = HIGH_POWER;
4835 if (new_power == dev_priv->rps.power)
4838 /* Note the units here are not exactly 1us, but 1280ns. */
4839 switch (new_power) {
4841 /* Upclock if more than 95% busy over 16ms */
4845 /* Downclock if less than 85% busy over 32ms */
4847 threshold_down = 85;
4851 /* Upclock if more than 90% busy over 13ms */
4855 /* Downclock if less than 75% busy over 32ms */
4857 threshold_down = 75;
4861 /* Upclock if more than 85% busy over 10ms */
4865 /* Downclock if less than 60% busy over 32ms */
4867 threshold_down = 60;
4871 /* When byt can survive without system hang with dynamic
4872 * sw freq adjustments, this restriction can be lifted.
4874 if (IS_VALLEYVIEW(dev_priv))
4877 I915_WRITE(GEN6_RP_UP_EI,
4878 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4879 I915_WRITE(GEN6_RP_UP_THRESHOLD,
4880 GT_INTERVAL_FROM_US(dev_priv,
4881 ei_up * threshold_up / 100));
4883 I915_WRITE(GEN6_RP_DOWN_EI,
4884 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4885 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4886 GT_INTERVAL_FROM_US(dev_priv,
4887 ei_down * threshold_down / 100));
4889 I915_WRITE(GEN6_RP_CONTROL,
4890 GEN6_RP_MEDIA_TURBO |
4891 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4892 GEN6_RP_MEDIA_IS_GFX |
4894 GEN6_RP_UP_BUSY_AVG |
4895 GEN6_RP_DOWN_IDLE_AVG);
4898 dev_priv->rps.power = new_power;
4899 dev_priv->rps.up_threshold = threshold_up;
4900 dev_priv->rps.down_threshold = threshold_down;
4901 dev_priv->rps.last_adj = 0;
4904 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4908 if (val > dev_priv->rps.min_freq_softlimit)
4909 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
4910 if (val < dev_priv->rps.max_freq_softlimit)
4911 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
4913 mask &= dev_priv->pm_rps_events;
4915 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
4918 /* gen6_set_rps is called to update the frequency request, but should also be
4919 * called when the range (min_delay and max_delay) is modified so that we can
4920 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4921 static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
4923 /* min/max delay may still have been modified so be sure to
4924 * write the limits value.
4926 if (val != dev_priv->rps.cur_freq) {
4927 gen6_set_rps_thresholds(dev_priv, val);
4929 if (IS_GEN9(dev_priv))
4930 I915_WRITE(GEN6_RPNSWREQ,
4931 GEN9_FREQUENCY(val));
4932 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4933 I915_WRITE(GEN6_RPNSWREQ,
4934 HSW_FREQUENCY(val));
4936 I915_WRITE(GEN6_RPNSWREQ,
4937 GEN6_FREQUENCY(val) |
4939 GEN6_AGGRESSIVE_TURBO);
4942 /* Make sure we continue to get interrupts
4943 * until we hit the minimum or maximum frequencies.
4945 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
4946 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4948 dev_priv->rps.cur_freq = val;
4949 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4954 static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
4958 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
4959 "Odd GPU freq value\n"))
4962 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4964 if (val != dev_priv->rps.cur_freq) {
4965 err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
4969 gen6_set_rps_thresholds(dev_priv, val);
4972 dev_priv->rps.cur_freq = val;
4973 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4978 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
4980 * * If Gfx is Idle, then
4981 * 1. Forcewake Media well.
4982 * 2. Request idle freq.
4983 * 3. Release Forcewake of Media well.
4985 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4987 u32 val = dev_priv->rps.idle_freq;
4990 if (dev_priv->rps.cur_freq <= val)
4993 /* The punit delays the write of the frequency and voltage until it
4994 * determines the GPU is awake. During normal usage we don't want to
4995 * waste power changing the frequency if the GPU is sleeping (rc6).
4996 * However, the GPU and driver is now idle and we do not want to delay
4997 * switching to minimum voltage (reducing power whilst idle) as we do
4998 * not expect to be woken in the near future and so must flush the
4999 * change by waking the device.
5001 * We choose to take the media powerwell (either would do to trick the
5002 * punit into committing the voltage change) as that takes a lot less
5003 * power than the render powerwell.
5005 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
5006 err = valleyview_set_rps(dev_priv, val);
5007 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
5010 DRM_ERROR("Failed to set RPS for idle\n");
5013 void gen6_rps_busy(struct drm_i915_private *dev_priv)
5015 mutex_lock(&dev_priv->rps.hw_lock);
5016 if (dev_priv->rps.enabled) {
5019 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
5020 gen6_rps_reset_ei(dev_priv);
5021 I915_WRITE(GEN6_PMINTRMSK,
5022 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
5024 gen6_enable_rps_interrupts(dev_priv);
5026 /* Use the user's desired frequency as a guide, but for better
5027 * performance, jump directly to RPe as our starting frequency.
5029 freq = max(dev_priv->rps.cur_freq,
5030 dev_priv->rps.efficient_freq);
5032 if (intel_set_rps(dev_priv,
5034 dev_priv->rps.min_freq_softlimit,
5035 dev_priv->rps.max_freq_softlimit)))
5036 DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
5038 mutex_unlock(&dev_priv->rps.hw_lock);
5041 void gen6_rps_idle(struct drm_i915_private *dev_priv)
5043 /* Flush our bottom-half so that it does not race with us
5044 * setting the idle frequency and so that it is bounded by
5045 * our rpm wakeref. And then disable the interrupts to stop any
5046 * futher RPS reclocking whilst we are asleep.
5048 gen6_disable_rps_interrupts(dev_priv);
5050 mutex_lock(&dev_priv->rps.hw_lock);
5051 if (dev_priv->rps.enabled) {
5052 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5053 vlv_set_rps_idle(dev_priv);
5055 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
5056 dev_priv->rps.last_adj = 0;
5057 I915_WRITE(GEN6_PMINTRMSK,
5058 gen6_sanitize_rps_pm_mask(dev_priv, ~0));
5060 mutex_unlock(&dev_priv->rps.hw_lock);
5062 spin_lock(&dev_priv->rps.client_lock);
5063 while (!list_empty(&dev_priv->rps.clients))
5064 list_del_init(dev_priv->rps.clients.next);
5065 spin_unlock(&dev_priv->rps.client_lock);
5068 void gen6_rps_boost(struct drm_i915_private *dev_priv,
5069 struct intel_rps_client *rps,
5070 unsigned long submitted)
5072 /* This is intentionally racy! We peek at the state here, then
5073 * validate inside the RPS worker.
5075 if (!(dev_priv->gt.awake &&
5076 dev_priv->rps.enabled &&
5077 dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
5080 /* Force a RPS boost (and don't count it against the client) if
5081 * the GPU is severely congested.
5083 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
5086 spin_lock(&dev_priv->rps.client_lock);
5087 if (rps == NULL || list_empty(&rps->link)) {
5088 spin_lock_irq(&dev_priv->irq_lock);
5089 if (dev_priv->rps.interrupts_enabled) {
5090 dev_priv->rps.client_boost = true;
5091 schedule_work(&dev_priv->rps.work);
5093 spin_unlock_irq(&dev_priv->irq_lock);
5096 list_add(&rps->link, &dev_priv->rps.clients);
5099 dev_priv->rps.boosts++;
5101 spin_unlock(&dev_priv->rps.client_lock);
5104 int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
5108 lockdep_assert_held(&dev_priv->rps.hw_lock);
5109 GEM_BUG_ON(val > dev_priv->rps.max_freq);
5110 GEM_BUG_ON(val < dev_priv->rps.min_freq);
5112 if (!dev_priv->rps.enabled) {
5113 dev_priv->rps.cur_freq = val;
5117 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5118 err = valleyview_set_rps(dev_priv, val);
5120 err = gen6_set_rps(dev_priv, val);
5125 static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
5127 I915_WRITE(GEN6_RC_CONTROL, 0);
5128 I915_WRITE(GEN9_PG_ENABLE, 0);
5131 static void gen9_disable_rps(struct drm_i915_private *dev_priv)
5133 I915_WRITE(GEN6_RP_CONTROL, 0);
5136 static void gen6_disable_rps(struct drm_i915_private *dev_priv)
5138 I915_WRITE(GEN6_RC_CONTROL, 0);
5139 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
5140 I915_WRITE(GEN6_RP_CONTROL, 0);
5143 static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
5145 I915_WRITE(GEN6_RC_CONTROL, 0);
5148 static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
5150 /* we're doing forcewake before Disabling RC6,
5151 * This what the BIOS expects when going into suspend */
5152 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5154 I915_WRITE(GEN6_RC_CONTROL, 0);
5156 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5159 static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
5161 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5162 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
5163 mode = GEN6_RC_CTL_RC6_ENABLE;
5167 if (HAS_RC6p(dev_priv))
5168 DRM_DEBUG_DRIVER("Enabling RC6 states: "
5169 "RC6 %s RC6p %s RC6pp %s\n",
5170 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
5171 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
5172 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
5175 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
5176 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
5179 static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
5181 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5182 bool enable_rc6 = true;
5183 unsigned long rc6_ctx_base;
5187 rc_ctl = I915_READ(GEN6_RC_CONTROL);
5188 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
5189 RC_SW_TARGET_STATE_SHIFT;
5190 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
5191 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
5192 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
5193 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
5196 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
5197 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
5202 * The exact context size is not known for BXT, so assume a page size
5205 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
5206 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
5207 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
5208 ggtt->stolen_reserved_size))) {
5209 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
5213 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
5214 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
5215 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
5216 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
5217 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
5221 if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
5222 !I915_READ(GEN8_PUSHBUS_ENABLE) ||
5223 !I915_READ(GEN8_PUSHBUS_SHIFT)) {
5224 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
5228 if (!I915_READ(GEN6_GFXPAUSE)) {
5229 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
5233 if (!I915_READ(GEN8_MISC_CTRL0)) {
5234 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
5241 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
5243 /* No RC6 before Ironlake and code is gone for ilk. */
5244 if (INTEL_INFO(dev_priv)->gen < 6)
5250 if (IS_GEN9_LP(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
5251 DRM_INFO("RC6 disabled by BIOS\n");
5255 /* Respect the kernel parameter if it is set */
5256 if (enable_rc6 >= 0) {
5259 if (HAS_RC6p(dev_priv))
5260 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
5263 mask = INTEL_RC6_ENABLE;
5265 if ((enable_rc6 & mask) != enable_rc6)
5266 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
5267 "(requested %d, valid %d)\n",
5268 enable_rc6 & mask, enable_rc6, mask);
5270 return enable_rc6 & mask;
5273 if (IS_IVYBRIDGE(dev_priv))
5274 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
5276 return INTEL_RC6_ENABLE;
5279 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
5281 /* All of these values are in units of 50MHz */
5283 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
5284 if (IS_GEN9_LP(dev_priv)) {
5285 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
5286 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
5287 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
5288 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
5290 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
5291 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
5292 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
5293 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
5295 /* hw_max = RP0 until we check for overclocking */
5296 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
5298 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
5299 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
5300 IS_GEN9_BC(dev_priv)) {
5301 u32 ddcc_status = 0;
5303 if (sandybridge_pcode_read(dev_priv,
5304 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
5306 dev_priv->rps.efficient_freq =
5308 ((ddcc_status >> 8) & 0xff),
5309 dev_priv->rps.min_freq,
5310 dev_priv->rps.max_freq);
5313 if (IS_GEN9_BC(dev_priv)) {
5314 /* Store the frequency values in 16.66 MHZ units, which is
5315 * the natural hardware unit for SKL
5317 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
5318 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
5319 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
5320 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
5321 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
5325 static void reset_rps(struct drm_i915_private *dev_priv,
5326 int (*set)(struct drm_i915_private *, u8))
5328 u8 freq = dev_priv->rps.cur_freq;
5331 dev_priv->rps.power = -1;
5332 dev_priv->rps.cur_freq = -1;
5334 if (set(dev_priv, freq))
5335 DRM_ERROR("Failed to reset RPS to initial values\n");
5338 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
5339 static void gen9_enable_rps(struct drm_i915_private *dev_priv)
5341 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5343 /* Program defaults and thresholds for RPS*/
5344 I915_WRITE(GEN6_RC_VIDEO_FREQ,
5345 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
5347 /* 1 second timeout*/
5348 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
5349 GT_INTERVAL_FROM_US(dev_priv, 1000000));
5351 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
5353 /* Leaning on the below call to gen6_set_rps to program/setup the
5354 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
5355 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
5356 reset_rps(dev_priv, gen6_set_rps);
5358 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5361 static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
5363 struct intel_engine_cs *engine;
5364 enum intel_engine_id id;
5365 uint32_t rc6_mask = 0;
5367 /* 1a: Software RC state - RC0 */
5368 I915_WRITE(GEN6_RC_STATE, 0);
5370 /* 1b: Get forcewake during program sequence. Although the driver
5371 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5372 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5374 /* 2a: Disable RC states. */
5375 I915_WRITE(GEN6_RC_CONTROL, 0);
5377 /* 2b: Program RC6 thresholds.*/
5379 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
5380 if (IS_SKYLAKE(dev_priv))
5381 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
5383 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
5384 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5385 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5386 for_each_engine(engine, dev_priv, id)
5387 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5389 if (HAS_GUC(dev_priv))
5390 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
5392 I915_WRITE(GEN6_RC_SLEEP, 0);
5394 /* 2c: Program Coarse Power Gating Policies. */
5395 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
5396 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
5398 /* 3a: Enable RC6 */
5399 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5400 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
5401 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
5402 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
5403 I915_WRITE(GEN6_RC_CONTROL,
5404 GEN6_RC_CTL_HW_ENABLE | GEN6_RC_CTL_EI_MODE(1) | rc6_mask);
5407 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
5408 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
5410 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
5411 I915_WRITE(GEN9_PG_ENABLE, 0);
5413 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
5414 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
5416 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5419 static void gen8_enable_rps(struct drm_i915_private *dev_priv)
5421 struct intel_engine_cs *engine;
5422 enum intel_engine_id id;
5423 uint32_t rc6_mask = 0;
5425 /* 1a: Software RC state - RC0 */
5426 I915_WRITE(GEN6_RC_STATE, 0);
5428 /* 1c & 1d: Get forcewake during program sequence. Although the driver
5429 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5430 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5432 /* 2a: Disable RC states. */
5433 I915_WRITE(GEN6_RC_CONTROL, 0);
5435 /* 2b: Program RC6 thresholds.*/
5436 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5437 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5438 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5439 for_each_engine(engine, dev_priv, id)
5440 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5441 I915_WRITE(GEN6_RC_SLEEP, 0);
5442 if (IS_BROADWELL(dev_priv))
5443 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
5445 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
5448 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5449 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
5450 intel_print_rc6_info(dev_priv, rc6_mask);
5451 if (IS_BROADWELL(dev_priv))
5452 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5453 GEN7_RC_CTL_TO_MODE |
5456 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5457 GEN6_RC_CTL_EI_MODE(1) |
5460 /* 4 Program defaults and thresholds for RPS*/
5461 I915_WRITE(GEN6_RPNSWREQ,
5462 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5463 I915_WRITE(GEN6_RC_VIDEO_FREQ,
5464 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5465 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
5466 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
5468 /* Docs recommend 900MHz, and 300 MHz respectively */
5469 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
5470 dev_priv->rps.max_freq_softlimit << 24 |
5471 dev_priv->rps.min_freq_softlimit << 16);
5473 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
5474 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
5475 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
5476 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
5478 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5481 I915_WRITE(GEN6_RP_CONTROL,
5482 GEN6_RP_MEDIA_TURBO |
5483 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5484 GEN6_RP_MEDIA_IS_GFX |
5486 GEN6_RP_UP_BUSY_AVG |
5487 GEN6_RP_DOWN_IDLE_AVG);
5489 /* 6: Ring frequency + overclocking (our driver does this later */
5491 reset_rps(dev_priv, gen6_set_rps);
5493 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5496 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
5498 struct intel_engine_cs *engine;
5499 enum intel_engine_id id;
5500 u32 rc6vids, rc6_mask = 0;
5505 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5507 /* Here begins a magic sequence of register writes to enable
5508 * auto-downclocking.
5510 * Perhaps there might be some value in exposing these to
5513 I915_WRITE(GEN6_RC_STATE, 0);
5515 /* Clear the DBG now so we don't confuse earlier errors */
5516 gtfifodbg = I915_READ(GTFIFODBG);
5518 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
5519 I915_WRITE(GTFIFODBG, gtfifodbg);
5522 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5524 /* disable the counters and set deterministic thresholds */
5525 I915_WRITE(GEN6_RC_CONTROL, 0);
5527 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
5528 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
5529 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
5530 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5531 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5533 for_each_engine(engine, dev_priv, id)
5534 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5536 I915_WRITE(GEN6_RC_SLEEP, 0);
5537 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
5538 if (IS_IVYBRIDGE(dev_priv))
5539 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
5541 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
5542 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
5543 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
5545 /* Check if we are enabling RC6 */
5546 rc6_mode = intel_enable_rc6();
5547 if (rc6_mode & INTEL_RC6_ENABLE)
5548 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
5550 /* We don't use those on Haswell */
5551 if (!IS_HASWELL(dev_priv)) {
5552 if (rc6_mode & INTEL_RC6p_ENABLE)
5553 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
5555 if (rc6_mode & INTEL_RC6pp_ENABLE)
5556 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
5559 intel_print_rc6_info(dev_priv, rc6_mask);
5561 I915_WRITE(GEN6_RC_CONTROL,
5563 GEN6_RC_CTL_EI_MODE(1) |
5564 GEN6_RC_CTL_HW_ENABLE);
5566 /* Power down if completely idle for over 50ms */
5567 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
5568 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5570 reset_rps(dev_priv, gen6_set_rps);
5573 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5574 if (IS_GEN6(dev_priv) && ret) {
5575 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5576 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5577 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5578 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5579 rc6vids &= 0xffff00;
5580 rc6vids |= GEN6_ENCODE_RC6_VID(450);
5581 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
5583 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5586 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5589 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5592 unsigned int gpu_freq;
5593 unsigned int max_ia_freq, min_ring_freq;
5594 unsigned int max_gpu_freq, min_gpu_freq;
5595 int scaling_factor = 180;
5596 struct cpufreq_policy *policy;
5598 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5600 policy = cpufreq_cpu_get(0);
5602 max_ia_freq = policy->cpuinfo.max_freq;
5603 cpufreq_cpu_put(policy);
5606 * Default to measured freq if none found, PCU will ensure we
5609 max_ia_freq = tsc_khz;
5612 /* Convert from kHz to MHz */
5613 max_ia_freq /= 1000;
5615 min_ring_freq = I915_READ(DCLK) & 0xf;
5616 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5617 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
5619 if (IS_GEN9_BC(dev_priv)) {
5620 /* Convert GT frequency to 50 HZ units */
5621 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5622 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5624 min_gpu_freq = dev_priv->rps.min_freq;
5625 max_gpu_freq = dev_priv->rps.max_freq;
5629 * For each potential GPU frequency, load a ring frequency we'd like
5630 * to use for memory access. We do this by specifying the IA frequency
5631 * the PCU should use as a reference to determine the ring frequency.
5633 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
5634 int diff = max_gpu_freq - gpu_freq;
5635 unsigned int ia_freq = 0, ring_freq = 0;
5637 if (IS_GEN9_BC(dev_priv)) {
5639 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5640 * No floor required for ring frequency on SKL.
5642 ring_freq = gpu_freq;
5643 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
5644 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5645 ring_freq = max(min_ring_freq, gpu_freq);
5646 } else if (IS_HASWELL(dev_priv)) {
5647 ring_freq = mult_frac(gpu_freq, 5, 4);
5648 ring_freq = max(min_ring_freq, ring_freq);
5649 /* leave ia_freq as the default, chosen by cpufreq */
5651 /* On older processors, there is no separate ring
5652 * clock domain, so in order to boost the bandwidth
5653 * of the ring, we need to upclock the CPU (ia_freq).
5655 * For GPU frequencies less than 750MHz,
5656 * just use the lowest ring freq.
5658 if (gpu_freq < min_freq)
5661 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
5662 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
5665 sandybridge_pcode_write(dev_priv,
5666 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
5667 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
5668 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
5673 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
5677 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5679 switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
5681 /* (2 * 4) config */
5682 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5685 /* (2 * 6) config */
5686 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5689 /* (2 * 8) config */
5691 /* Setting (2 * 8) Min RP0 for any other combination */
5692 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5696 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5701 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5705 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
5706 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
5711 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5715 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5716 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5721 static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
5725 val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
5726 rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
5727 FB_GFX_FREQ_FUSE_MASK);
5732 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
5736 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5738 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
5743 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
5747 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5749 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
5751 rp0 = min_t(u32, rp0, 0xea);
5756 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5760 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
5761 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
5762 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
5763 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
5768 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
5772 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
5774 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
5775 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
5776 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
5777 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
5778 * to make sure it matches what Punit accepts.
5780 return max_t(u32, val, 0xc0);
5783 /* Check that the pctx buffer wasn't move under us. */
5784 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
5786 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5788 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
5789 dev_priv->vlv_pctx->stolen->start);
5793 /* Check that the pcbr address is not empty. */
5794 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5796 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5798 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5801 static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
5803 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5804 unsigned long pctx_paddr, paddr;
5806 int pctx_size = 32*1024;
5808 pcbr = I915_READ(VLV_PCBR);
5809 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
5810 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5811 paddr = (dev_priv->mm.stolen_base +
5812 (ggtt->stolen_size - pctx_size));
5814 pctx_paddr = (paddr & (~4095));
5815 I915_WRITE(VLV_PCBR, pctx_paddr);
5818 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5821 static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5823 struct drm_i915_gem_object *pctx;
5824 unsigned long pctx_paddr;
5826 int pctx_size = 24*1024;
5828 pcbr = I915_READ(VLV_PCBR);
5830 /* BIOS set it up already, grab the pre-alloc'd space */
5833 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5834 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
5836 I915_GTT_OFFSET_NONE,
5841 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5844 * From the Gunit register HAS:
5845 * The Gfx driver is expected to program this register and ensure
5846 * proper allocation within Gfx stolen memory. For example, this
5847 * register should be programmed such than the PCBR range does not
5848 * overlap with other ranges, such as the frame buffer, protected
5849 * memory, or any other relevant ranges.
5851 pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
5853 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5857 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5858 I915_WRITE(VLV_PCBR, pctx_paddr);
5861 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5862 dev_priv->vlv_pctx = pctx;
5865 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
5867 if (WARN_ON(!dev_priv->vlv_pctx))
5870 i915_gem_object_put(dev_priv->vlv_pctx);
5871 dev_priv->vlv_pctx = NULL;
5874 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
5876 dev_priv->rps.gpll_ref_freq =
5877 vlv_get_cck_clock(dev_priv, "GPLL ref",
5878 CCK_GPLL_CLOCK_CONTROL,
5879 dev_priv->czclk_freq);
5881 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
5882 dev_priv->rps.gpll_ref_freq);
5885 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
5889 valleyview_setup_pctx(dev_priv);
5891 vlv_init_gpll_ref_freq(dev_priv);
5893 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5894 switch ((val >> 6) & 3) {
5897 dev_priv->mem_freq = 800;
5900 dev_priv->mem_freq = 1066;
5903 dev_priv->mem_freq = 1333;
5906 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5908 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5909 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5910 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5911 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5912 dev_priv->rps.max_freq);
5914 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
5915 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5916 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5917 dev_priv->rps.efficient_freq);
5919 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
5920 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
5921 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5922 dev_priv->rps.rp1_freq);
5924 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
5925 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5926 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5927 dev_priv->rps.min_freq);
5930 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
5934 cherryview_setup_pctx(dev_priv);
5936 vlv_init_gpll_ref_freq(dev_priv);
5938 mutex_lock(&dev_priv->sb_lock);
5939 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
5940 mutex_unlock(&dev_priv->sb_lock);
5942 switch ((val >> 2) & 0x7) {
5944 dev_priv->mem_freq = 2000;
5947 dev_priv->mem_freq = 1600;
5950 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5952 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5953 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5954 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5955 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5956 dev_priv->rps.max_freq);
5958 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
5959 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5960 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5961 dev_priv->rps.efficient_freq);
5963 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
5964 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
5965 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5966 dev_priv->rps.rp1_freq);
5968 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
5969 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5970 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5971 dev_priv->rps.min_freq);
5973 WARN_ONCE((dev_priv->rps.max_freq |
5974 dev_priv->rps.efficient_freq |
5975 dev_priv->rps.rp1_freq |
5976 dev_priv->rps.min_freq) & 1,
5977 "Odd GPU freq values\n");
5980 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
5982 valleyview_cleanup_pctx(dev_priv);
5985 static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
5987 struct intel_engine_cs *engine;
5988 enum intel_engine_id id;
5989 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
5991 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5993 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
5994 GT_FIFO_FREE_ENTRIES_CHV);
5996 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5998 I915_WRITE(GTFIFODBG, gtfifodbg);
6001 cherryview_check_pctx(dev_priv);
6003 /* 1a & 1b: Get forcewake during program sequence. Although the driver
6004 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6005 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6007 /* Disable RC states. */
6008 I915_WRITE(GEN6_RC_CONTROL, 0);
6010 /* 2a: Program RC6 thresholds.*/
6011 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
6012 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
6013 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
6015 for_each_engine(engine, dev_priv, id)
6016 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6017 I915_WRITE(GEN6_RC_SLEEP, 0);
6019 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
6020 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
6022 /* allows RC6 residency counter to work */
6023 I915_WRITE(VLV_COUNTER_CONTROL,
6024 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
6025 VLV_MEDIA_RC6_COUNT_EN |
6026 VLV_RENDER_RC6_COUNT_EN));
6028 /* For now we assume BIOS is allocating and populating the PCBR */
6029 pcbr = I915_READ(VLV_PCBR);
6032 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
6033 (pcbr >> VLV_PCBR_ADDR_SHIFT))
6034 rc6_mode = GEN7_RC_CTL_TO_MODE;
6036 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
6038 /* 4 Program defaults and thresholds for RPS*/
6039 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6040 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
6041 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
6042 I915_WRITE(GEN6_RP_UP_EI, 66000);
6043 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
6045 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6048 I915_WRITE(GEN6_RP_CONTROL,
6049 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6050 GEN6_RP_MEDIA_IS_GFX |
6052 GEN6_RP_UP_BUSY_AVG |
6053 GEN6_RP_DOWN_IDLE_AVG);
6055 /* Setting Fixed Bias */
6056 val = VLV_OVERRIDE_EN |
6058 CHV_BIAS_CPU_50_SOC_50;
6059 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
6061 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
6063 /* RPS code assumes GPLL is used */
6064 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
6066 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
6067 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
6069 reset_rps(dev_priv, valleyview_set_rps);
6071 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6074 static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
6076 struct intel_engine_cs *engine;
6077 enum intel_engine_id id;
6078 u32 gtfifodbg, val, rc6_mode = 0;
6080 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6082 valleyview_check_pctx(dev_priv);
6084 gtfifodbg = I915_READ(GTFIFODBG);
6086 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
6088 I915_WRITE(GTFIFODBG, gtfifodbg);
6091 /* If VLV, Forcewake all wells, else re-direct to regular path */
6092 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6094 /* Disable RC states. */
6095 I915_WRITE(GEN6_RC_CONTROL, 0);
6097 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6098 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
6099 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
6100 I915_WRITE(GEN6_RP_UP_EI, 66000);
6101 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
6103 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6105 I915_WRITE(GEN6_RP_CONTROL,
6106 GEN6_RP_MEDIA_TURBO |
6107 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6108 GEN6_RP_MEDIA_IS_GFX |
6110 GEN6_RP_UP_BUSY_AVG |
6111 GEN6_RP_DOWN_IDLE_CONT);
6113 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
6114 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
6115 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
6117 for_each_engine(engine, dev_priv, id)
6118 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6120 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
6122 /* allows RC6 residency counter to work */
6123 I915_WRITE(VLV_COUNTER_CONTROL,
6124 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
6125 VLV_RENDER_RC0_COUNT_EN |
6126 VLV_MEDIA_RC6_COUNT_EN |
6127 VLV_RENDER_RC6_COUNT_EN));
6129 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
6130 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
6132 intel_print_rc6_info(dev_priv, rc6_mode);
6134 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
6136 /* Setting Fixed Bias */
6137 val = VLV_OVERRIDE_EN |
6139 VLV_BIAS_CPU_125_SOC_875;
6140 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
6142 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
6144 /* RPS code assumes GPLL is used */
6145 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
6147 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
6148 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
6150 reset_rps(dev_priv, valleyview_set_rps);
6152 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6155 static unsigned long intel_pxfreq(u32 vidfreq)
6158 int div = (vidfreq & 0x3f0000) >> 16;
6159 int post = (vidfreq & 0x3000) >> 12;
6160 int pre = (vidfreq & 0x7);
6165 freq = ((div * 133333) / ((1<<post) * pre));
6170 static const struct cparams {
6176 { 1, 1333, 301, 28664 },
6177 { 1, 1066, 294, 24460 },
6178 { 1, 800, 294, 25192 },
6179 { 0, 1333, 276, 27605 },
6180 { 0, 1066, 276, 27605 },
6181 { 0, 800, 231, 23784 },
6184 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
6186 u64 total_count, diff, ret;
6187 u32 count1, count2, count3, m = 0, c = 0;
6188 unsigned long now = jiffies_to_msecs(jiffies), diff1;
6191 lockdep_assert_held(&mchdev_lock);
6193 diff1 = now - dev_priv->ips.last_time1;
6195 /* Prevent division-by-zero if we are asking too fast.
6196 * Also, we don't get interesting results if we are polling
6197 * faster than once in 10ms, so just return the saved value
6201 return dev_priv->ips.chipset_power;
6203 count1 = I915_READ(DMIEC);
6204 count2 = I915_READ(DDREC);
6205 count3 = I915_READ(CSIEC);
6207 total_count = count1 + count2 + count3;
6209 /* FIXME: handle per-counter overflow */
6210 if (total_count < dev_priv->ips.last_count1) {
6211 diff = ~0UL - dev_priv->ips.last_count1;
6212 diff += total_count;
6214 diff = total_count - dev_priv->ips.last_count1;
6217 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
6218 if (cparams[i].i == dev_priv->ips.c_m &&
6219 cparams[i].t == dev_priv->ips.r_t) {
6226 diff = div_u64(diff, diff1);
6227 ret = ((m * diff) + c);
6228 ret = div_u64(ret, 10);
6230 dev_priv->ips.last_count1 = total_count;
6231 dev_priv->ips.last_time1 = now;
6233 dev_priv->ips.chipset_power = ret;
6238 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
6242 if (INTEL_INFO(dev_priv)->gen != 5)
6245 spin_lock_irq(&mchdev_lock);
6247 val = __i915_chipset_val(dev_priv);
6249 spin_unlock_irq(&mchdev_lock);
6254 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
6256 unsigned long m, x, b;
6259 tsfs = I915_READ(TSFS);
6261 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
6262 x = I915_READ8(TR1);
6264 b = tsfs & TSFS_INTR_MASK;
6266 return ((m * x) / 127) - b;
6269 static int _pxvid_to_vd(u8 pxvid)
6274 if (pxvid >= 8 && pxvid < 31)
6277 return (pxvid + 2) * 125;
6280 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
6282 const int vd = _pxvid_to_vd(pxvid);
6283 const int vm = vd - 1125;
6285 if (INTEL_INFO(dev_priv)->is_mobile)
6286 return vm > 0 ? vm : 0;
6291 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
6293 u64 now, diff, diffms;
6296 lockdep_assert_held(&mchdev_lock);
6298 now = ktime_get_raw_ns();
6299 diffms = now - dev_priv->ips.last_time2;
6300 do_div(diffms, NSEC_PER_MSEC);
6302 /* Don't divide by 0 */
6306 count = I915_READ(GFXEC);
6308 if (count < dev_priv->ips.last_count2) {
6309 diff = ~0UL - dev_priv->ips.last_count2;
6312 diff = count - dev_priv->ips.last_count2;
6315 dev_priv->ips.last_count2 = count;
6316 dev_priv->ips.last_time2 = now;
6318 /* More magic constants... */
6320 diff = div_u64(diff, diffms * 10);
6321 dev_priv->ips.gfx_power = diff;
6324 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
6326 if (INTEL_INFO(dev_priv)->gen != 5)
6329 spin_lock_irq(&mchdev_lock);
6331 __i915_update_gfx_val(dev_priv);
6333 spin_unlock_irq(&mchdev_lock);
6336 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
6338 unsigned long t, corr, state1, corr2, state2;
6341 lockdep_assert_held(&mchdev_lock);
6343 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
6344 pxvid = (pxvid >> 24) & 0x7f;
6345 ext_v = pvid_to_extvid(dev_priv, pxvid);
6349 t = i915_mch_val(dev_priv);
6351 /* Revel in the empirically derived constants */
6353 /* Correction factor in 1/100000 units */
6355 corr = ((t * 2349) + 135940);
6357 corr = ((t * 964) + 29317);
6359 corr = ((t * 301) + 1004);
6361 corr = corr * ((150142 * state1) / 10000 - 78642);
6363 corr2 = (corr * dev_priv->ips.corr);
6365 state2 = (corr2 * state1) / 10000;
6366 state2 /= 100; /* convert to mW */
6368 __i915_update_gfx_val(dev_priv);
6370 return dev_priv->ips.gfx_power + state2;
6373 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
6377 if (INTEL_INFO(dev_priv)->gen != 5)
6380 spin_lock_irq(&mchdev_lock);
6382 val = __i915_gfx_val(dev_priv);
6384 spin_unlock_irq(&mchdev_lock);
6390 * i915_read_mch_val - return value for IPS use
6392 * Calculate and return a value for the IPS driver to use when deciding whether
6393 * we have thermal and power headroom to increase CPU or GPU power budget.
6395 unsigned long i915_read_mch_val(void)
6397 struct drm_i915_private *dev_priv;
6398 unsigned long chipset_val, graphics_val, ret = 0;
6400 spin_lock_irq(&mchdev_lock);
6403 dev_priv = i915_mch_dev;
6405 chipset_val = __i915_chipset_val(dev_priv);
6406 graphics_val = __i915_gfx_val(dev_priv);
6408 ret = chipset_val + graphics_val;
6411 spin_unlock_irq(&mchdev_lock);
6415 EXPORT_SYMBOL_GPL(i915_read_mch_val);
6418 * i915_gpu_raise - raise GPU frequency limit
6420 * Raise the limit; IPS indicates we have thermal headroom.
6422 bool i915_gpu_raise(void)
6424 struct drm_i915_private *dev_priv;
6427 spin_lock_irq(&mchdev_lock);
6428 if (!i915_mch_dev) {
6432 dev_priv = i915_mch_dev;
6434 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
6435 dev_priv->ips.max_delay--;
6438 spin_unlock_irq(&mchdev_lock);
6442 EXPORT_SYMBOL_GPL(i915_gpu_raise);
6445 * i915_gpu_lower - lower GPU frequency limit
6447 * IPS indicates we're close to a thermal limit, so throttle back the GPU
6448 * frequency maximum.
6450 bool i915_gpu_lower(void)
6452 struct drm_i915_private *dev_priv;
6455 spin_lock_irq(&mchdev_lock);
6456 if (!i915_mch_dev) {
6460 dev_priv = i915_mch_dev;
6462 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
6463 dev_priv->ips.max_delay++;
6466 spin_unlock_irq(&mchdev_lock);
6470 EXPORT_SYMBOL_GPL(i915_gpu_lower);
6473 * i915_gpu_busy - indicate GPU business to IPS
6475 * Tell the IPS driver whether or not the GPU is busy.
6477 bool i915_gpu_busy(void)
6481 spin_lock_irq(&mchdev_lock);
6483 ret = i915_mch_dev->gt.awake;
6484 spin_unlock_irq(&mchdev_lock);
6488 EXPORT_SYMBOL_GPL(i915_gpu_busy);
6491 * i915_gpu_turbo_disable - disable graphics turbo
6493 * Disable graphics turbo by resetting the max frequency and setting the
6494 * current frequency to the default.
6496 bool i915_gpu_turbo_disable(void)
6498 struct drm_i915_private *dev_priv;
6501 spin_lock_irq(&mchdev_lock);
6502 if (!i915_mch_dev) {
6506 dev_priv = i915_mch_dev;
6508 dev_priv->ips.max_delay = dev_priv->ips.fstart;
6510 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
6514 spin_unlock_irq(&mchdev_lock);
6518 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
6521 * Tells the intel_ips driver that the i915 driver is now loaded, if
6522 * IPS got loaded first.
6524 * This awkward dance is so that neither module has to depend on the
6525 * other in order for IPS to do the appropriate communication of
6526 * GPU turbo limits to i915.
6529 ips_ping_for_i915_load(void)
6533 link = symbol_get(ips_link_to_i915_driver);
6536 symbol_put(ips_link_to_i915_driver);
6540 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
6542 /* We only register the i915 ips part with intel-ips once everything is
6543 * set up, to avoid intel-ips sneaking in and reading bogus values. */
6544 spin_lock_irq(&mchdev_lock);
6545 i915_mch_dev = dev_priv;
6546 spin_unlock_irq(&mchdev_lock);
6548 ips_ping_for_i915_load();
6551 void intel_gpu_ips_teardown(void)
6553 spin_lock_irq(&mchdev_lock);
6554 i915_mch_dev = NULL;
6555 spin_unlock_irq(&mchdev_lock);
6558 static void intel_init_emon(struct drm_i915_private *dev_priv)
6564 /* Disable to program */
6568 /* Program energy weights for various events */
6569 I915_WRITE(SDEW, 0x15040d00);
6570 I915_WRITE(CSIEW0, 0x007f0000);
6571 I915_WRITE(CSIEW1, 0x1e220004);
6572 I915_WRITE(CSIEW2, 0x04000004);
6574 for (i = 0; i < 5; i++)
6575 I915_WRITE(PEW(i), 0);
6576 for (i = 0; i < 3; i++)
6577 I915_WRITE(DEW(i), 0);
6579 /* Program P-state weights to account for frequency power adjustment */
6580 for (i = 0; i < 16; i++) {
6581 u32 pxvidfreq = I915_READ(PXVFREQ(i));
6582 unsigned long freq = intel_pxfreq(pxvidfreq);
6583 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6588 val *= (freq / 1000);
6590 val /= (127*127*900);
6592 DRM_ERROR("bad pxval: %ld\n", val);
6595 /* Render standby states get 0 weight */
6599 for (i = 0; i < 4; i++) {
6600 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6601 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6602 I915_WRITE(PXW(i), val);
6605 /* Adjust magic regs to magic values (more experimental results) */
6606 I915_WRITE(OGW0, 0);
6607 I915_WRITE(OGW1, 0);
6608 I915_WRITE(EG0, 0x00007f00);
6609 I915_WRITE(EG1, 0x0000000e);
6610 I915_WRITE(EG2, 0x000e0000);
6611 I915_WRITE(EG3, 0x68000300);
6612 I915_WRITE(EG4, 0x42000000);
6613 I915_WRITE(EG5, 0x00140031);
6617 for (i = 0; i < 8; i++)
6618 I915_WRITE(PXWL(i), 0);
6620 /* Enable PMON + select events */
6621 I915_WRITE(ECR, 0x80000019);
6623 lcfuse = I915_READ(LCFUSE02);
6625 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
6628 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
6631 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6634 if (!i915.enable_rc6) {
6635 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6636 intel_runtime_pm_get(dev_priv);
6639 mutex_lock(&dev_priv->drm.struct_mutex);
6640 mutex_lock(&dev_priv->rps.hw_lock);
6642 /* Initialize RPS limits (for userspace) */
6643 if (IS_CHERRYVIEW(dev_priv))
6644 cherryview_init_gt_powersave(dev_priv);
6645 else if (IS_VALLEYVIEW(dev_priv))
6646 valleyview_init_gt_powersave(dev_priv);
6647 else if (INTEL_GEN(dev_priv) >= 6)
6648 gen6_init_rps_frequencies(dev_priv);
6650 /* Derive initial user preferences/limits from the hardware limits */
6651 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
6652 dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
6654 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
6655 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
6657 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6658 dev_priv->rps.min_freq_softlimit =
6660 dev_priv->rps.efficient_freq,
6661 intel_freq_opcode(dev_priv, 450));
6663 /* After setting max-softlimit, find the overclock max freq */
6664 if (IS_GEN6(dev_priv) ||
6665 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
6668 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, ¶ms);
6669 if (params & BIT(31)) { /* OC supported */
6670 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
6671 (dev_priv->rps.max_freq & 0xff) * 50,
6672 (params & 0xff) * 50);
6673 dev_priv->rps.max_freq = params & 0xff;
6677 /* Finally allow us to boost to max by default */
6678 dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
6680 mutex_unlock(&dev_priv->rps.hw_lock);
6681 mutex_unlock(&dev_priv->drm.struct_mutex);
6683 intel_autoenable_gt_powersave(dev_priv);
6686 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6688 if (IS_VALLEYVIEW(dev_priv))
6689 valleyview_cleanup_gt_powersave(dev_priv);
6691 if (!i915.enable_rc6)
6692 intel_runtime_pm_put(dev_priv);
6696 * intel_suspend_gt_powersave - suspend PM work and helper threads
6697 * @dev_priv: i915 device
6699 * We don't want to disable RC6 or other features here, we just want
6700 * to make sure any work we've queued has finished and won't bother
6701 * us while we're suspended.
6703 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
6705 if (INTEL_GEN(dev_priv) < 6)
6708 if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
6709 intel_runtime_pm_put(dev_priv);
6711 /* gen6_rps_idle() will be called later to disable interrupts */
6714 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
6716 dev_priv->rps.enabled = true; /* force disabling */
6717 intel_disable_gt_powersave(dev_priv);
6719 gen6_reset_rps_interrupts(dev_priv);
6722 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
6724 if (!READ_ONCE(dev_priv->rps.enabled))
6727 mutex_lock(&dev_priv->rps.hw_lock);
6729 if (INTEL_GEN(dev_priv) >= 9) {
6730 gen9_disable_rc6(dev_priv);
6731 gen9_disable_rps(dev_priv);
6732 } else if (IS_CHERRYVIEW(dev_priv)) {
6733 cherryview_disable_rps(dev_priv);
6734 } else if (IS_VALLEYVIEW(dev_priv)) {
6735 valleyview_disable_rps(dev_priv);
6736 } else if (INTEL_GEN(dev_priv) >= 6) {
6737 gen6_disable_rps(dev_priv);
6738 } else if (IS_IRONLAKE_M(dev_priv)) {
6739 ironlake_disable_drps(dev_priv);
6742 dev_priv->rps.enabled = false;
6743 mutex_unlock(&dev_priv->rps.hw_lock);
6746 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
6748 /* We shouldn't be disabling as we submit, so this should be less
6749 * racy than it appears!
6751 if (READ_ONCE(dev_priv->rps.enabled))
6754 /* Powersaving is controlled by the host when inside a VM */
6755 if (intel_vgpu_active(dev_priv))
6758 mutex_lock(&dev_priv->rps.hw_lock);
6760 if (IS_CHERRYVIEW(dev_priv)) {
6761 cherryview_enable_rps(dev_priv);
6762 } else if (IS_VALLEYVIEW(dev_priv)) {
6763 valleyview_enable_rps(dev_priv);
6764 } else if (INTEL_GEN(dev_priv) >= 9) {
6765 gen9_enable_rc6(dev_priv);
6766 gen9_enable_rps(dev_priv);
6767 if (IS_GEN9_BC(dev_priv))
6768 gen6_update_ring_freq(dev_priv);
6769 } else if (IS_BROADWELL(dev_priv)) {
6770 gen8_enable_rps(dev_priv);
6771 gen6_update_ring_freq(dev_priv);
6772 } else if (INTEL_GEN(dev_priv) >= 6) {
6773 gen6_enable_rps(dev_priv);
6774 gen6_update_ring_freq(dev_priv);
6775 } else if (IS_IRONLAKE_M(dev_priv)) {
6776 ironlake_enable_drps(dev_priv);
6777 intel_init_emon(dev_priv);
6780 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
6781 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
6783 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
6784 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
6786 dev_priv->rps.enabled = true;
6787 mutex_unlock(&dev_priv->rps.hw_lock);
6790 static void __intel_autoenable_gt_powersave(struct work_struct *work)
6792 struct drm_i915_private *dev_priv =
6793 container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
6794 struct intel_engine_cs *rcs;
6795 struct drm_i915_gem_request *req;
6797 if (READ_ONCE(dev_priv->rps.enabled))
6800 rcs = dev_priv->engine[RCS];
6801 if (rcs->last_retired_context)
6804 if (!rcs->init_context)
6807 mutex_lock(&dev_priv->drm.struct_mutex);
6809 req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
6813 if (!i915.enable_execlists && i915_switch_context(req) == 0)
6814 rcs->init_context(req);
6816 /* Mark the device busy, calling intel_enable_gt_powersave() */
6817 i915_add_request_no_flush(req);
6820 mutex_unlock(&dev_priv->drm.struct_mutex);
6822 intel_runtime_pm_put(dev_priv);
6825 void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
6827 if (READ_ONCE(dev_priv->rps.enabled))
6830 if (IS_IRONLAKE_M(dev_priv)) {
6831 ironlake_enable_drps(dev_priv);
6832 intel_init_emon(dev_priv);
6833 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6835 * PCU communication is slow and this doesn't need to be
6836 * done at any specific time, so do this out of our fast path
6837 * to make resume and init faster.
6839 * We depend on the HW RC6 power context save/restore
6840 * mechanism when entering D3 through runtime PM suspend. So
6841 * disable RPM until RPS/RC6 is properly setup. We can only
6842 * get here via the driver load/system resume/runtime resume
6843 * paths, so the _noresume version is enough (and in case of
6844 * runtime resume it's necessary).
6846 if (queue_delayed_work(dev_priv->wq,
6847 &dev_priv->rps.autoenable_work,
6848 round_jiffies_up_relative(HZ)))
6849 intel_runtime_pm_get_noresume(dev_priv);
6853 static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
6856 * On Ibex Peak and Cougar Point, we need to disable clock
6857 * gating for the panel power sequencer or it will fail to
6858 * start up when no ports are active.
6860 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6863 static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
6867 for_each_pipe(dev_priv, pipe) {
6868 I915_WRITE(DSPCNTR(pipe),
6869 I915_READ(DSPCNTR(pipe)) |
6870 DISPPLANE_TRICKLE_FEED_DISABLE);
6872 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6873 POSTING_READ(DSPSURF(pipe));
6877 static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
6879 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6880 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6881 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6884 * Don't touch WM1S_LP_EN here.
6885 * Doing so could cause underruns.
6889 static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv)
6891 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6895 * WaFbcDisableDpfcClockGating:ilk
6897 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6898 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6899 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6901 I915_WRITE(PCH_3DCGDIS0,
6902 MARIUNIT_CLOCK_GATE_DISABLE |
6903 SVSMUNIT_CLOCK_GATE_DISABLE);
6904 I915_WRITE(PCH_3DCGDIS1,
6905 VFMUNIT_CLOCK_GATE_DISABLE);
6908 * According to the spec the following bits should be set in
6909 * order to enable memory self-refresh
6910 * The bit 22/21 of 0x42004
6911 * The bit 5 of 0x42020
6912 * The bit 15 of 0x45000
6914 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6915 (I915_READ(ILK_DISPLAY_CHICKEN2) |
6916 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
6917 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6918 I915_WRITE(DISP_ARB_CTL,
6919 (I915_READ(DISP_ARB_CTL) |
6922 ilk_init_lp_watermarks(dev_priv);
6925 * Based on the document from hardware guys the following bits
6926 * should be set unconditionally in order to enable FBC.
6927 * The bit 22 of 0x42000
6928 * The bit 22 of 0x42004
6929 * The bit 7,8,9 of 0x42020.
6931 if (IS_IRONLAKE_M(dev_priv)) {
6932 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6933 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6934 I915_READ(ILK_DISPLAY_CHICKEN1) |
6936 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6937 I915_READ(ILK_DISPLAY_CHICKEN2) |
6941 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6943 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6944 I915_READ(ILK_DISPLAY_CHICKEN2) |
6945 ILK_ELPIN_409_SELECT);
6946 I915_WRITE(_3D_CHICKEN2,
6947 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6948 _3D_CHICKEN2_WM_READ_PIPELINED);
6950 /* WaDisableRenderCachePipelinedFlush:ilk */
6951 I915_WRITE(CACHE_MODE_0,
6952 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6954 /* WaDisable_RenderCache_OperationalFlush:ilk */
6955 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6957 g4x_disable_trickle_feed(dev_priv);
6959 ibx_init_clock_gating(dev_priv);
6962 static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
6968 * On Ibex Peak and Cougar Point, we need to disable clock
6969 * gating for the panel power sequencer or it will fail to
6970 * start up when no ports are active.
6972 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6973 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6974 PCH_CPUNIT_CLOCK_GATE_DISABLE);
6975 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6976 DPLS_EDP_PPS_FIX_DIS);
6977 /* The below fixes the weird display corruption, a few pixels shifted
6978 * downward, on (only) LVDS of some HP laptops with IVY.
6980 for_each_pipe(dev_priv, pipe) {
6981 val = I915_READ(TRANS_CHICKEN2(pipe));
6982 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6983 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6984 if (dev_priv->vbt.fdi_rx_polarity_inverted)
6985 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6986 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
6987 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6988 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
6989 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6991 /* WADP0ClockGatingDisable */
6992 for_each_pipe(dev_priv, pipe) {
6993 I915_WRITE(TRANS_CHICKEN1(pipe),
6994 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6998 static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
7002 tmp = I915_READ(MCH_SSKPD);
7003 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
7004 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
7008 static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
7010 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
7012 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
7014 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7015 I915_READ(ILK_DISPLAY_CHICKEN2) |
7016 ILK_ELPIN_409_SELECT);
7018 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
7019 I915_WRITE(_3D_CHICKEN,
7020 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
7022 /* WaDisable_RenderCache_OperationalFlush:snb */
7023 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7026 * BSpec recoomends 8x4 when MSAA is used,
7027 * however in practice 16x4 seems fastest.
7029 * Note that PS/WM thread counts depend on the WIZ hashing
7030 * disable bit, which we don't touch here, but it's good
7031 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7033 I915_WRITE(GEN6_GT_MODE,
7034 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7036 ilk_init_lp_watermarks(dev_priv);
7038 I915_WRITE(CACHE_MODE_0,
7039 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
7041 I915_WRITE(GEN6_UCGCTL1,
7042 I915_READ(GEN6_UCGCTL1) |
7043 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
7044 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7046 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
7047 * gating disable must be set. Failure to set it results in
7048 * flickering pixels due to Z write ordering failures after
7049 * some amount of runtime in the Mesa "fire" demo, and Unigine
7050 * Sanctuary and Tropics, and apparently anything else with
7051 * alpha test or pixel discard.
7053 * According to the spec, bit 11 (RCCUNIT) must also be set,
7054 * but we didn't debug actual testcases to find it out.
7056 * WaDisableRCCUnitClockGating:snb
7057 * WaDisableRCPBUnitClockGating:snb
7059 I915_WRITE(GEN6_UCGCTL2,
7060 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
7061 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
7063 /* WaStripsFansDisableFastClipPerformanceFix:snb */
7064 I915_WRITE(_3D_CHICKEN3,
7065 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
7069 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
7070 * 3DSTATE_SF number of SF output attributes is more than 16."
7072 I915_WRITE(_3D_CHICKEN3,
7073 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
7076 * According to the spec the following bits should be
7077 * set in order to enable memory self-refresh and fbc:
7078 * The bit21 and bit22 of 0x42000
7079 * The bit21 and bit22 of 0x42004
7080 * The bit5 and bit7 of 0x42020
7081 * The bit14 of 0x70180
7082 * The bit14 of 0x71180
7084 * WaFbcAsynchFlipDisableFbcQueue:snb
7086 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7087 I915_READ(ILK_DISPLAY_CHICKEN1) |
7088 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7089 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7090 I915_READ(ILK_DISPLAY_CHICKEN2) |
7091 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7092 I915_WRITE(ILK_DSPCLK_GATE_D,
7093 I915_READ(ILK_DSPCLK_GATE_D) |
7094 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
7095 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
7097 g4x_disable_trickle_feed(dev_priv);
7099 cpt_init_clock_gating(dev_priv);
7101 gen6_check_mch_setup(dev_priv);
7104 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
7106 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
7109 * WaVSThreadDispatchOverride:ivb,vlv
7111 * This actually overrides the dispatch
7112 * mode for all thread types.
7114 reg &= ~GEN7_FF_SCHED_MASK;
7115 reg |= GEN7_FF_TS_SCHED_HW;
7116 reg |= GEN7_FF_VS_SCHED_HW;
7117 reg |= GEN7_FF_DS_SCHED_HW;
7119 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
7122 static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
7125 * TODO: this bit should only be enabled when really needed, then
7126 * disabled when not needed anymore in order to save power.
7128 if (HAS_PCH_LPT_LP(dev_priv))
7129 I915_WRITE(SOUTH_DSPCLK_GATE_D,
7130 I915_READ(SOUTH_DSPCLK_GATE_D) |
7131 PCH_LP_PARTITION_LEVEL_DISABLE);
7133 /* WADPOClockGatingDisable:hsw */
7134 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
7135 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
7136 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7139 static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
7141 if (HAS_PCH_LPT_LP(dev_priv)) {
7142 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
7144 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7145 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7149 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
7150 int general_prio_credits,
7151 int high_prio_credits)
7155 /* WaTempDisableDOPClkGating:bdw */
7156 misccpctl = I915_READ(GEN7_MISCCPCTL);
7157 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
7159 I915_WRITE(GEN8_L3SQCREG1,
7160 L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
7161 L3_HIGH_PRIO_CREDITS(high_prio_credits));
7164 * Wait at least 100 clocks before re-enabling clock gating.
7165 * See the definition of L3SQCREG1 in BSpec.
7167 POSTING_READ(GEN8_L3SQCREG1);
7169 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
7172 static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
7174 gen9_init_clock_gating(dev_priv);
7176 /* WaDisableSDEUnitClockGating:kbl */
7177 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7178 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7179 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7181 /* WaDisableGamClockGating:kbl */
7182 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7183 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7184 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
7186 /* WaFbcNukeOnHostModify:kbl */
7187 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7188 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7191 static void skylake_init_clock_gating(struct drm_i915_private *dev_priv)
7193 gen9_init_clock_gating(dev_priv);
7195 /* WAC6entrylatency:skl */
7196 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
7197 FBC_LLC_FULLY_OPEN);
7199 /* WaFbcNukeOnHostModify:skl */
7200 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7201 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7204 static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
7208 ilk_init_lp_watermarks(dev_priv);
7210 /* WaSwitchSolVfFArbitrationPriority:bdw */
7211 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7213 /* WaPsrDPAMaskVBlankInSRD:bdw */
7214 I915_WRITE(CHICKEN_PAR1_1,
7215 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
7217 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
7218 for_each_pipe(dev_priv, pipe) {
7219 I915_WRITE(CHICKEN_PIPESL_1(pipe),
7220 I915_READ(CHICKEN_PIPESL_1(pipe)) |
7221 BDW_DPRS_MASK_VBLANK_SRD);
7224 /* WaVSRefCountFullforceMissDisable:bdw */
7225 /* WaDSRefCountFullforceMissDisable:bdw */
7226 I915_WRITE(GEN7_FF_THREAD_MODE,
7227 I915_READ(GEN7_FF_THREAD_MODE) &
7228 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7230 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7231 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7233 /* WaDisableSDEUnitClockGating:bdw */
7234 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7235 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7237 /* WaProgramL3SqcReg1Default:bdw */
7238 gen8_set_l3sqc_credits(dev_priv, 30, 2);
7241 * WaGttCachingOffByDefault:bdw
7242 * GTT cache may not work with big pages, so if those
7243 * are ever enabled GTT cache may need to be disabled.
7245 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7247 /* WaKVMNotificationOnConfigChange:bdw */
7248 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
7249 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
7251 lpt_init_clock_gating(dev_priv);
7253 /* WaDisableDopClockGating:bdw
7255 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
7258 I915_WRITE(GEN6_UCGCTL1,
7259 I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
7262 static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
7264 ilk_init_lp_watermarks(dev_priv);
7266 /* L3 caching of data atomics doesn't work -- disable it. */
7267 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
7268 I915_WRITE(HSW_ROW_CHICKEN3,
7269 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
7271 /* This is required by WaCatErrorRejectionIssue:hsw */
7272 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7273 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7274 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7276 /* WaVSRefCountFullforceMissDisable:hsw */
7277 I915_WRITE(GEN7_FF_THREAD_MODE,
7278 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
7280 /* WaDisable_RenderCache_OperationalFlush:hsw */
7281 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7283 /* enable HiZ Raw Stall Optimization */
7284 I915_WRITE(CACHE_MODE_0_GEN7,
7285 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7287 /* WaDisable4x2SubspanOptimization:hsw */
7288 I915_WRITE(CACHE_MODE_1,
7289 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7292 * BSpec recommends 8x4 when MSAA is used,
7293 * however in practice 16x4 seems fastest.
7295 * Note that PS/WM thread counts depend on the WIZ hashing
7296 * disable bit, which we don't touch here, but it's good
7297 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7299 I915_WRITE(GEN7_GT_MODE,
7300 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7302 /* WaSampleCChickenBitEnable:hsw */
7303 I915_WRITE(HALF_SLICE_CHICKEN3,
7304 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
7306 /* WaSwitchSolVfFArbitrationPriority:hsw */
7307 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7309 /* WaRsPkgCStateDisplayPMReq:hsw */
7310 I915_WRITE(CHICKEN_PAR1_1,
7311 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
7313 lpt_init_clock_gating(dev_priv);
7316 static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv)
7320 ilk_init_lp_watermarks(dev_priv);
7322 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
7324 /* WaDisableEarlyCull:ivb */
7325 I915_WRITE(_3D_CHICKEN3,
7326 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7328 /* WaDisableBackToBackFlipFix:ivb */
7329 I915_WRITE(IVB_CHICKEN3,
7330 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7331 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7333 /* WaDisablePSDDualDispatchEnable:ivb */
7334 if (IS_IVB_GT1(dev_priv))
7335 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7336 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7338 /* WaDisable_RenderCache_OperationalFlush:ivb */
7339 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7341 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
7342 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
7343 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
7345 /* WaApplyL3ControlAndL3ChickenMode:ivb */
7346 I915_WRITE(GEN7_L3CNTLREG1,
7347 GEN7_WA_FOR_GEN7_L3_CONTROL);
7348 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
7349 GEN7_WA_L3_CHICKEN_MODE);
7350 if (IS_IVB_GT1(dev_priv))
7351 I915_WRITE(GEN7_ROW_CHICKEN2,
7352 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7354 /* must write both registers */
7355 I915_WRITE(GEN7_ROW_CHICKEN2,
7356 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7357 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
7358 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7361 /* WaForceL3Serialization:ivb */
7362 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7363 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7366 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7367 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
7369 I915_WRITE(GEN6_UCGCTL2,
7370 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7372 /* This is required by WaCatErrorRejectionIssue:ivb */
7373 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7374 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7375 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7377 g4x_disable_trickle_feed(dev_priv);
7379 gen7_setup_fixed_func_scheduler(dev_priv);
7381 if (0) { /* causes HiZ corruption on ivb:gt1 */
7382 /* enable HiZ Raw Stall Optimization */
7383 I915_WRITE(CACHE_MODE_0_GEN7,
7384 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7387 /* WaDisable4x2SubspanOptimization:ivb */
7388 I915_WRITE(CACHE_MODE_1,
7389 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7392 * BSpec recommends 8x4 when MSAA is used,
7393 * however in practice 16x4 seems fastest.
7395 * Note that PS/WM thread counts depend on the WIZ hashing
7396 * disable bit, which we don't touch here, but it's good
7397 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7399 I915_WRITE(GEN7_GT_MODE,
7400 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7402 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
7403 snpcr &= ~GEN6_MBC_SNPCR_MASK;
7404 snpcr |= GEN6_MBC_SNPCR_MED;
7405 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
7407 if (!HAS_PCH_NOP(dev_priv))
7408 cpt_init_clock_gating(dev_priv);
7410 gen6_check_mch_setup(dev_priv);
7413 static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv)
7415 /* WaDisableEarlyCull:vlv */
7416 I915_WRITE(_3D_CHICKEN3,
7417 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7419 /* WaDisableBackToBackFlipFix:vlv */
7420 I915_WRITE(IVB_CHICKEN3,
7421 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7422 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7424 /* WaPsdDispatchEnable:vlv */
7425 /* WaDisablePSDDualDispatchEnable:vlv */
7426 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7427 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
7428 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7430 /* WaDisable_RenderCache_OperationalFlush:vlv */
7431 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7433 /* WaForceL3Serialization:vlv */
7434 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7435 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7437 /* WaDisableDopClockGating:vlv */
7438 I915_WRITE(GEN7_ROW_CHICKEN2,
7439 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7441 /* This is required by WaCatErrorRejectionIssue:vlv */
7442 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7443 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7444 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7446 gen7_setup_fixed_func_scheduler(dev_priv);
7449 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7450 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
7452 I915_WRITE(GEN6_UCGCTL2,
7453 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7455 /* WaDisableL3Bank2xClockGate:vlv
7456 * Disabling L3 clock gating- MMIO 940c[25] = 1
7457 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
7458 I915_WRITE(GEN7_UCGCTL4,
7459 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
7462 * BSpec says this must be set, even though
7463 * WaDisable4x2SubspanOptimization isn't listed for VLV.
7465 I915_WRITE(CACHE_MODE_1,
7466 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7469 * BSpec recommends 8x4 when MSAA is used,
7470 * however in practice 16x4 seems fastest.
7472 * Note that PS/WM thread counts depend on the WIZ hashing
7473 * disable bit, which we don't touch here, but it's good
7474 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7476 I915_WRITE(GEN7_GT_MODE,
7477 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7480 * WaIncreaseL3CreditsForVLVB0:vlv
7481 * This is the hardware default actually.
7483 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
7486 * WaDisableVLVClockGating_VBIIssue:vlv
7487 * Disable clock gating on th GCFG unit to prevent a delay
7488 * in the reporting of vblank events.
7490 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
7493 static void cherryview_init_clock_gating(struct drm_i915_private *dev_priv)
7495 /* WaVSRefCountFullforceMissDisable:chv */
7496 /* WaDSRefCountFullforceMissDisable:chv */
7497 I915_WRITE(GEN7_FF_THREAD_MODE,
7498 I915_READ(GEN7_FF_THREAD_MODE) &
7499 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7501 /* WaDisableSemaphoreAndSyncFlipWait:chv */
7502 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7503 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7505 /* WaDisableCSUnitClockGating:chv */
7506 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7507 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7509 /* WaDisableSDEUnitClockGating:chv */
7510 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7511 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7514 * WaProgramL3SqcReg1Default:chv
7515 * See gfxspecs/Related Documents/Performance Guide/
7516 * LSQC Setting Recommendations.
7518 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7521 * GTT cache may not work with big pages, so if those
7522 * are ever enabled GTT cache may need to be disabled.
7524 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7527 static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
7529 uint32_t dspclk_gate;
7531 I915_WRITE(RENCLK_GATE_D1, 0);
7532 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7533 GS_UNIT_CLOCK_GATE_DISABLE |
7534 CL_UNIT_CLOCK_GATE_DISABLE);
7535 I915_WRITE(RAMCLK_GATE_D, 0);
7536 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7537 OVRUNIT_CLOCK_GATE_DISABLE |
7538 OVCUNIT_CLOCK_GATE_DISABLE;
7539 if (IS_GM45(dev_priv))
7540 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7541 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7543 /* WaDisableRenderCachePipelinedFlush */
7544 I915_WRITE(CACHE_MODE_0,
7545 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
7547 /* WaDisable_RenderCache_OperationalFlush:g4x */
7548 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7550 g4x_disable_trickle_feed(dev_priv);
7553 static void crestline_init_clock_gating(struct drm_i915_private *dev_priv)
7555 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7556 I915_WRITE(RENCLK_GATE_D2, 0);
7557 I915_WRITE(DSPCLK_GATE_D, 0);
7558 I915_WRITE(RAMCLK_GATE_D, 0);
7559 I915_WRITE16(DEUC, 0);
7560 I915_WRITE(MI_ARB_STATE,
7561 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7563 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7564 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7567 static void broadwater_init_clock_gating(struct drm_i915_private *dev_priv)
7569 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7570 I965_RCC_CLOCK_GATE_DISABLE |
7571 I965_RCPB_CLOCK_GATE_DISABLE |
7572 I965_ISC_CLOCK_GATE_DISABLE |
7573 I965_FBC_CLOCK_GATE_DISABLE);
7574 I915_WRITE(RENCLK_GATE_D2, 0);
7575 I915_WRITE(MI_ARB_STATE,
7576 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7578 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7579 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7582 static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
7584 u32 dstate = I915_READ(D_STATE);
7586 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7587 DSTATE_DOT_CLOCK_GATING;
7588 I915_WRITE(D_STATE, dstate);
7590 if (IS_PINEVIEW(dev_priv))
7591 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
7593 /* IIR "flip pending" means done if this bit is set */
7594 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
7596 /* interrupts should cause a wake up from C3 */
7597 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
7599 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7600 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7602 I915_WRITE(MI_ARB_STATE,
7603 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7606 static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
7608 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7610 /* interrupts should cause a wake up from C3 */
7611 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7612 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7614 I915_WRITE(MEM_MODE,
7615 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
7618 static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
7620 I915_WRITE(MEM_MODE,
7621 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7622 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
7625 void intel_init_clock_gating(struct drm_i915_private *dev_priv)
7627 dev_priv->display.init_clock_gating(dev_priv);
7630 void intel_suspend_hw(struct drm_i915_private *dev_priv)
7632 if (HAS_PCH_LPT(dev_priv))
7633 lpt_suspend_hw(dev_priv);
7636 static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
7638 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
7642 * intel_init_clock_gating_hooks - setup the clock gating hooks
7643 * @dev_priv: device private
7645 * Setup the hooks that configure which clocks of a given platform can be
7646 * gated and also apply various GT and display specific workarounds for these
7647 * platforms. Note that some GT specific workarounds are applied separately
7648 * when GPU contexts or batchbuffers start their execution.
7650 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7652 if (IS_SKYLAKE(dev_priv))
7653 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
7654 else if (IS_KABYLAKE(dev_priv))
7655 dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
7656 else if (IS_BROXTON(dev_priv))
7657 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7658 else if (IS_GEMINILAKE(dev_priv))
7659 dev_priv->display.init_clock_gating = glk_init_clock_gating;
7660 else if (IS_BROADWELL(dev_priv))
7661 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
7662 else if (IS_CHERRYVIEW(dev_priv))
7663 dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
7664 else if (IS_HASWELL(dev_priv))
7665 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
7666 else if (IS_IVYBRIDGE(dev_priv))
7667 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7668 else if (IS_VALLEYVIEW(dev_priv))
7669 dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
7670 else if (IS_GEN6(dev_priv))
7671 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7672 else if (IS_GEN5(dev_priv))
7673 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7674 else if (IS_G4X(dev_priv))
7675 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7676 else if (IS_I965GM(dev_priv))
7677 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7678 else if (IS_I965G(dev_priv))
7679 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7680 else if (IS_GEN3(dev_priv))
7681 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7682 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
7683 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7684 else if (IS_GEN2(dev_priv))
7685 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7687 MISSING_CASE(INTEL_DEVID(dev_priv));
7688 dev_priv->display.init_clock_gating = nop_init_clock_gating;
7692 /* Set up chip specific power management-related functions */
7693 void intel_init_pm(struct drm_i915_private *dev_priv)
7695 intel_fbc_init(dev_priv);
7698 if (IS_PINEVIEW(dev_priv))
7699 i915_pineview_get_mem_freq(dev_priv);
7700 else if (IS_GEN5(dev_priv))
7701 i915_ironlake_get_mem_freq(dev_priv);
7703 /* For FIFO watermark updates */
7704 if (INTEL_GEN(dev_priv) >= 9) {
7705 skl_setup_wm_latency(dev_priv);
7706 dev_priv->display.initial_watermarks = skl_initial_wm;
7707 dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
7708 dev_priv->display.compute_global_watermarks = skl_compute_wm;
7709 } else if (HAS_PCH_SPLIT(dev_priv)) {
7710 ilk_setup_wm_latency(dev_priv);
7712 if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
7713 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7714 (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
7715 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7716 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
7717 dev_priv->display.compute_intermediate_wm =
7718 ilk_compute_intermediate_wm;
7719 dev_priv->display.initial_watermarks =
7720 ilk_initial_watermarks;
7721 dev_priv->display.optimize_watermarks =
7722 ilk_optimize_watermarks;
7724 DRM_DEBUG_KMS("Failed to read display plane latency. "
7727 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7728 vlv_setup_wm_latency(dev_priv);
7729 dev_priv->display.update_wm = vlv_update_wm;
7730 } else if (IS_PINEVIEW(dev_priv)) {
7731 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
7734 dev_priv->mem_freq)) {
7735 DRM_INFO("failed to find known CxSR latency "
7736 "(found ddr%s fsb freq %d, mem freq %d), "
7738 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7739 dev_priv->fsb_freq, dev_priv->mem_freq);
7740 /* Disable CxSR and never update its watermark again */
7741 intel_set_memory_cxsr(dev_priv, false);
7742 dev_priv->display.update_wm = NULL;
7744 dev_priv->display.update_wm = pineview_update_wm;
7745 } else if (IS_G4X(dev_priv)) {
7746 dev_priv->display.update_wm = g4x_update_wm;
7747 } else if (IS_GEN4(dev_priv)) {
7748 dev_priv->display.update_wm = i965_update_wm;
7749 } else if (IS_GEN3(dev_priv)) {
7750 dev_priv->display.update_wm = i9xx_update_wm;
7751 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7752 } else if (IS_GEN2(dev_priv)) {
7753 if (INTEL_INFO(dev_priv)->num_pipes == 1) {
7754 dev_priv->display.update_wm = i845_update_wm;
7755 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7757 dev_priv->display.update_wm = i9xx_update_wm;
7758 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7761 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7765 static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
7768 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
7771 case GEN6_PCODE_SUCCESS:
7773 case GEN6_PCODE_UNIMPLEMENTED_CMD:
7774 case GEN6_PCODE_ILLEGAL_CMD:
7776 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7777 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7779 case GEN6_PCODE_TIMEOUT:
7787 static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
7790 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
7793 case GEN6_PCODE_SUCCESS:
7795 case GEN6_PCODE_ILLEGAL_CMD:
7797 case GEN7_PCODE_TIMEOUT:
7799 case GEN7_PCODE_ILLEGAL_DATA:
7801 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7804 MISSING_CASE(flags);
7809 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
7813 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7815 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7816 * use te fw I915_READ variants to reduce the amount of work
7817 * required when reading/writing.
7820 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7821 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7825 I915_WRITE_FW(GEN6_PCODE_DATA, *val);
7826 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
7827 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7829 if (intel_wait_for_register_fw(dev_priv,
7830 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7832 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7836 *val = I915_READ_FW(GEN6_PCODE_DATA);
7837 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7839 if (INTEL_GEN(dev_priv) > 6)
7840 status = gen7_check_mailbox_status(dev_priv);
7842 status = gen6_check_mailbox_status(dev_priv);
7845 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
7853 int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
7858 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7860 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7861 * use te fw I915_READ variants to reduce the amount of work
7862 * required when reading/writing.
7865 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7866 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7870 I915_WRITE_FW(GEN6_PCODE_DATA, val);
7871 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
7872 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7874 if (intel_wait_for_register_fw(dev_priv,
7875 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7877 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7881 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7883 if (INTEL_GEN(dev_priv) > 6)
7884 status = gen7_check_mailbox_status(dev_priv);
7886 status = gen6_check_mailbox_status(dev_priv);
7889 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
7897 static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
7898 u32 request, u32 reply_mask, u32 reply,
7903 *status = sandybridge_pcode_read(dev_priv, mbox, &val);
7905 return *status || ((val & reply_mask) == reply);
7909 * skl_pcode_request - send PCODE request until acknowledgment
7910 * @dev_priv: device private
7911 * @mbox: PCODE mailbox ID the request is targeted for
7912 * @request: request ID
7913 * @reply_mask: mask used to check for request acknowledgment
7914 * @reply: value used to check for request acknowledgment
7915 * @timeout_base_ms: timeout for polling with preemption enabled
7917 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
7918 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
7919 * The request is acknowledged once the PCODE reply dword equals @reply after
7920 * applying @reply_mask. Polling is first attempted with preemption enabled
7921 * for @timeout_base_ms and if this times out for another 50 ms with
7922 * preemption disabled.
7924 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
7925 * other error as reported by PCODE.
7927 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
7928 u32 reply_mask, u32 reply, int timeout_base_ms)
7933 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7935 #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
7939 * Prime the PCODE by doing a request first. Normally it guarantees
7940 * that a subsequent request, at most @timeout_base_ms later, succeeds.
7941 * _wait_for() doesn't guarantee when its passed condition is evaluated
7942 * first, so send the first request explicitly.
7948 ret = _wait_for(COND, timeout_base_ms * 1000, 10);
7953 * The above can time out if the number of requests was low (2 in the
7954 * worst case) _and_ PCODE was busy for some reason even after a
7955 * (queued) request and @timeout_base_ms delay. As a workaround retry
7956 * the poll with preemption disabled to maximize the number of
7957 * requests. Increase the timeout from @timeout_base_ms to 50ms to
7958 * account for interrupts that could reduce the number of these
7959 * requests, and for any quirks of the PCODE firmware that delays
7960 * the request completion.
7962 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
7963 WARN_ON_ONCE(timeout_base_ms > 3);
7965 ret = wait_for_atomic(COND, 50);
7969 return ret ? ret : status;
7973 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7977 * Slow = Fast = GPLL ref * N
7979 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
7982 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7984 return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
7987 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7991 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
7993 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
7996 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7998 /* CHV needs even values */
7999 return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
8002 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
8004 if (IS_GEN9(dev_priv))
8005 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
8007 else if (IS_CHERRYVIEW(dev_priv))
8008 return chv_gpu_freq(dev_priv, val);
8009 else if (IS_VALLEYVIEW(dev_priv))
8010 return byt_gpu_freq(dev_priv, val);
8012 return val * GT_FREQUENCY_MULTIPLIER;
8015 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
8017 if (IS_GEN9(dev_priv))
8018 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
8019 GT_FREQUENCY_MULTIPLIER);
8020 else if (IS_CHERRYVIEW(dev_priv))
8021 return chv_freq_opcode(dev_priv, val);
8022 else if (IS_VALLEYVIEW(dev_priv))
8023 return byt_freq_opcode(dev_priv, val);
8025 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
8028 struct request_boost {
8029 struct work_struct work;
8030 struct drm_i915_gem_request *req;
8033 static void __intel_rps_boost_work(struct work_struct *work)
8035 struct request_boost *boost = container_of(work, struct request_boost, work);
8036 struct drm_i915_gem_request *req = boost->req;
8038 if (!i915_gem_request_completed(req))
8039 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
8041 i915_gem_request_put(req);
8045 void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
8047 struct request_boost *boost;
8049 if (req == NULL || INTEL_GEN(req->i915) < 6)
8052 if (i915_gem_request_completed(req))
8055 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
8059 boost->req = i915_gem_request_get(req);
8061 INIT_WORK(&boost->work, __intel_rps_boost_work);
8062 queue_work(req->i915->wq, &boost->work);
8065 void intel_pm_setup(struct drm_i915_private *dev_priv)
8067 mutex_init(&dev_priv->rps.hw_lock);
8068 spin_lock_init(&dev_priv->rps.client_lock);
8070 INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
8071 __intel_autoenable_gt_powersave);
8072 INIT_LIST_HEAD(&dev_priv->rps.clients);
8074 dev_priv->pm.suspended = false;
8075 atomic_set(&dev_priv->pm.wakeref_count, 0);