2 * Copyright © 2006-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "intel_display_types.h"
25 #include "intel_dpio_phy.h"
26 #include "intel_dpll_mgr.h"
31 * Display PLLs used for driving outputs vary by platform. While some have
32 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33 * from a pool. In the latter scenario, it is possible that multiple pipes
34 * share a PLL if their configurations match.
36 * This file provides an abstraction over display PLLs. The function
37 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
38 * users of a PLL are tracked and that tracking is integrated with the atomic
39 * modset interface. During an atomic operation, required PLLs can be reserved
40 * for a given CRTC and encoder configuration by calling
41 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
42 * with intel_release_shared_dplls().
43 * Changes to the users are first staged in the atomic state, and then made
44 * effective by calling intel_shared_dpll_swap_state() during the atomic
49 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
50 struct intel_shared_dpll_state *shared_dpll)
54 /* Copy shared dpll state */
55 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
56 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
58 shared_dpll[i] = pll->state;
62 static struct intel_shared_dpll_state *
63 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
65 struct intel_atomic_state *state = to_intel_atomic_state(s);
67 WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
69 if (!state->dpll_set) {
70 state->dpll_set = true;
72 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
76 return state->shared_dpll;
80 * intel_get_shared_dpll_by_id - get a DPLL given its id
81 * @dev_priv: i915 device instance
85 * A pointer to the DPLL with @id
87 struct intel_shared_dpll *
88 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
89 enum intel_dpll_id id)
91 return &dev_priv->shared_dplls[id];
95 * intel_get_shared_dpll_id - get the id of a DPLL
96 * @dev_priv: i915 device instance
103 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
104 struct intel_shared_dpll *pll)
106 if (WARN_ON(pll < dev_priv->shared_dplls||
107 pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
110 return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
114 void assert_shared_dpll(struct drm_i915_private *dev_priv,
115 struct intel_shared_dpll *pll,
119 struct intel_dpll_hw_state hw_state;
121 if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
124 cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
125 I915_STATE_WARN(cur_state != state,
126 "%s assertion failure (expected %s, current %s)\n",
127 pll->info->name, onoff(state), onoff(cur_state));
131 * intel_prepare_shared_dpll - call a dpll's prepare hook
132 * @crtc_state: CRTC, and its state, which has a shared dpll
134 * This calls the PLL's prepare hook if it has one and if the PLL is not
135 * already enabled. The prepare hook is platform specific.
137 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
139 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
140 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
141 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
143 if (WARN_ON(pll == NULL))
146 mutex_lock(&dev_priv->dpll_lock);
147 WARN_ON(!pll->state.crtc_mask);
148 if (!pll->active_mask) {
149 DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
151 assert_shared_dpll_disabled(dev_priv, pll);
153 pll->info->funcs->prepare(dev_priv, pll);
155 mutex_unlock(&dev_priv->dpll_lock);
159 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
160 * @crtc_state: CRTC, and its state, which has a shared DPLL
162 * Enable the shared DPLL used by @crtc.
164 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
166 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
167 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
168 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
169 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
170 unsigned int old_mask;
172 if (WARN_ON(pll == NULL))
175 mutex_lock(&dev_priv->dpll_lock);
176 old_mask = pll->active_mask;
178 if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
179 WARN_ON(pll->active_mask & crtc_mask))
182 pll->active_mask |= crtc_mask;
184 DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
185 pll->info->name, pll->active_mask, pll->on,
190 assert_shared_dpll_enabled(dev_priv, pll);
195 DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
196 pll->info->funcs->enable(dev_priv, pll);
200 mutex_unlock(&dev_priv->dpll_lock);
204 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
205 * @crtc_state: CRTC, and its state, which has a shared DPLL
207 * Disable the shared DPLL used by @crtc.
209 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
211 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
212 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
213 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
214 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
216 /* PCH only available on ILK+ */
217 if (INTEL_GEN(dev_priv) < 5)
223 mutex_lock(&dev_priv->dpll_lock);
224 if (WARN_ON(!(pll->active_mask & crtc_mask)))
227 DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
228 pll->info->name, pll->active_mask, pll->on,
231 assert_shared_dpll_enabled(dev_priv, pll);
234 pll->active_mask &= ~crtc_mask;
235 if (pll->active_mask)
238 DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
239 pll->info->funcs->disable(dev_priv, pll);
243 mutex_unlock(&dev_priv->dpll_lock);
246 static struct intel_shared_dpll *
247 intel_find_shared_dpll(struct intel_atomic_state *state,
248 const struct intel_crtc *crtc,
249 const struct intel_dpll_hw_state *pll_state,
250 enum intel_dpll_id range_min,
251 enum intel_dpll_id range_max)
253 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
254 struct intel_shared_dpll *pll, *unused_pll = NULL;
255 struct intel_shared_dpll_state *shared_dpll;
256 enum intel_dpll_id i;
258 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
260 for (i = range_min; i <= range_max; i++) {
261 pll = &dev_priv->shared_dplls[i];
263 /* Only want to check enabled timings first */
264 if (shared_dpll[i].crtc_mask == 0) {
270 if (memcmp(pll_state,
271 &shared_dpll[i].hw_state,
272 sizeof(*pll_state)) == 0) {
273 DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
274 crtc->base.base.id, crtc->base.name,
276 shared_dpll[i].crtc_mask,
282 /* Ok no matching timings, maybe there's a free one? */
284 DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
285 crtc->base.base.id, crtc->base.name,
286 unused_pll->info->name);
294 intel_reference_shared_dpll(struct intel_atomic_state *state,
295 const struct intel_crtc *crtc,
296 const struct intel_shared_dpll *pll,
297 const struct intel_dpll_hw_state *pll_state)
299 struct intel_shared_dpll_state *shared_dpll;
300 const enum intel_dpll_id id = pll->info->id;
302 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
304 if (shared_dpll[id].crtc_mask == 0)
305 shared_dpll[id].hw_state = *pll_state;
307 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
308 pipe_name(crtc->pipe));
310 shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
313 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
314 const struct intel_crtc *crtc,
315 const struct intel_shared_dpll *pll)
317 struct intel_shared_dpll_state *shared_dpll;
319 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
320 shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
323 static void intel_put_dpll(struct intel_atomic_state *state,
324 struct intel_crtc *crtc)
326 const struct intel_crtc_state *old_crtc_state =
327 intel_atomic_get_old_crtc_state(state, crtc);
328 struct intel_crtc_state *new_crtc_state =
329 intel_atomic_get_new_crtc_state(state, crtc);
331 new_crtc_state->shared_dpll = NULL;
333 if (!old_crtc_state->shared_dpll)
336 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
340 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
341 * @state: atomic state
343 * This is the dpll version of drm_atomic_helper_swap_state() since the
344 * helper does not handle driver-specific global state.
346 * For consistency with atomic helpers this function does a complete swap,
347 * i.e. it also puts the current state into @state, even though there is no
348 * need for that at this moment.
350 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
352 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
353 struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
354 enum intel_dpll_id i;
356 if (!state->dpll_set)
359 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
360 struct intel_shared_dpll *pll =
361 &dev_priv->shared_dplls[i];
363 swap(pll->state, shared_dpll[i]);
367 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
368 struct intel_shared_dpll *pll,
369 struct intel_dpll_hw_state *hw_state)
371 const enum intel_dpll_id id = pll->info->id;
372 intel_wakeref_t wakeref;
375 wakeref = intel_display_power_get_if_enabled(dev_priv,
376 POWER_DOMAIN_DISPLAY_CORE);
380 val = I915_READ(PCH_DPLL(id));
381 hw_state->dpll = val;
382 hw_state->fp0 = I915_READ(PCH_FP0(id));
383 hw_state->fp1 = I915_READ(PCH_FP1(id));
385 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
387 return val & DPLL_VCO_ENABLE;
390 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
391 struct intel_shared_dpll *pll)
393 const enum intel_dpll_id id = pll->info->id;
395 I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
396 I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
399 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
404 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
406 val = I915_READ(PCH_DREF_CONTROL);
407 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
408 DREF_SUPERSPREAD_SOURCE_MASK));
409 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
412 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
413 struct intel_shared_dpll *pll)
415 const enum intel_dpll_id id = pll->info->id;
417 /* PCH refclock must be enabled first */
418 ibx_assert_pch_refclk_enabled(dev_priv);
420 I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
422 /* Wait for the clocks to stabilize. */
423 POSTING_READ(PCH_DPLL(id));
426 /* The pixel multiplier can only be updated once the
427 * DPLL is enabled and the clocks are stable.
431 I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
432 POSTING_READ(PCH_DPLL(id));
436 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
437 struct intel_shared_dpll *pll)
439 const enum intel_dpll_id id = pll->info->id;
441 I915_WRITE(PCH_DPLL(id), 0);
442 POSTING_READ(PCH_DPLL(id));
446 static bool ibx_get_dpll(struct intel_atomic_state *state,
447 struct intel_crtc *crtc,
448 struct intel_encoder *encoder)
450 struct intel_crtc_state *crtc_state =
451 intel_atomic_get_new_crtc_state(state, crtc);
452 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
453 struct intel_shared_dpll *pll;
454 enum intel_dpll_id i;
456 if (HAS_PCH_IBX(dev_priv)) {
457 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
458 i = (enum intel_dpll_id) crtc->pipe;
459 pll = &dev_priv->shared_dplls[i];
461 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
462 crtc->base.base.id, crtc->base.name,
465 pll = intel_find_shared_dpll(state, crtc,
466 &crtc_state->dpll_hw_state,
474 /* reference the pll */
475 intel_reference_shared_dpll(state, crtc,
476 pll, &crtc_state->dpll_hw_state);
478 crtc_state->shared_dpll = pll;
483 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
484 const struct intel_dpll_hw_state *hw_state)
486 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
487 "fp0: 0x%x, fp1: 0x%x\n",
494 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
495 .prepare = ibx_pch_dpll_prepare,
496 .enable = ibx_pch_dpll_enable,
497 .disable = ibx_pch_dpll_disable,
498 .get_hw_state = ibx_pch_dpll_get_hw_state,
501 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
502 struct intel_shared_dpll *pll)
504 const enum intel_dpll_id id = pll->info->id;
506 I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
507 POSTING_READ(WRPLL_CTL(id));
511 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
512 struct intel_shared_dpll *pll)
514 I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
515 POSTING_READ(SPLL_CTL);
519 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
520 struct intel_shared_dpll *pll)
522 const enum intel_dpll_id id = pll->info->id;
525 val = I915_READ(WRPLL_CTL(id));
526 I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
527 POSTING_READ(WRPLL_CTL(id));
530 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
531 struct intel_shared_dpll *pll)
535 val = I915_READ(SPLL_CTL);
536 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
537 POSTING_READ(SPLL_CTL);
540 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
541 struct intel_shared_dpll *pll,
542 struct intel_dpll_hw_state *hw_state)
544 const enum intel_dpll_id id = pll->info->id;
545 intel_wakeref_t wakeref;
548 wakeref = intel_display_power_get_if_enabled(dev_priv,
549 POWER_DOMAIN_DISPLAY_CORE);
553 val = I915_READ(WRPLL_CTL(id));
554 hw_state->wrpll = val;
556 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
558 return val & WRPLL_PLL_ENABLE;
561 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
562 struct intel_shared_dpll *pll,
563 struct intel_dpll_hw_state *hw_state)
565 intel_wakeref_t wakeref;
568 wakeref = intel_display_power_get_if_enabled(dev_priv,
569 POWER_DOMAIN_DISPLAY_CORE);
573 val = I915_READ(SPLL_CTL);
574 hw_state->spll = val;
576 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
578 return val & SPLL_PLL_ENABLE;
582 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
588 /* Constraints for PLL good behavior */
594 struct hsw_wrpll_rnp {
598 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
672 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
673 unsigned int r2, unsigned int n2,
675 struct hsw_wrpll_rnp *best)
677 u64 a, b, c, d, diff, diff_best;
679 /* No best (r,n,p) yet */
688 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
692 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
695 * and we would like delta <= budget.
697 * If the discrepancy is above the PPM-based budget, always prefer to
698 * improve upon the previous solution. However, if you're within the
699 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
701 a = freq2k * budget * p * r2;
702 b = freq2k * budget * best->p * best->r2;
703 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
704 diff_best = abs_diff(freq2k * best->p * best->r2,
705 LC_FREQ_2K * best->n2);
707 d = 1000000 * diff_best;
709 if (a < c && b < d) {
710 /* If both are above the budget, pick the closer */
711 if (best->p * best->r2 * diff < p * r2 * diff_best) {
716 } else if (a >= c && b < d) {
717 /* If A is below the threshold but B is above it? Update. */
721 } else if (a >= c && b >= d) {
722 /* Both are below the limit, so pick the higher n2/(r2*r2) */
723 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
729 /* Otherwise a < c && b >= d, do nothing */
733 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
734 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
738 struct hsw_wrpll_rnp best = { 0, 0, 0 };
741 freq2k = clock / 100;
743 budget = hsw_wrpll_get_budget_for_freq(clock);
745 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
746 * and directly pass the LC PLL to it. */
747 if (freq2k == 5400000) {
755 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
758 * We want R so that REF_MIN <= Ref <= REF_MAX.
759 * Injecting R2 = 2 * R gives:
760 * REF_MAX * r2 > LC_FREQ * 2 and
761 * REF_MIN * r2 < LC_FREQ * 2
763 * Which means the desired boundaries for r2 are:
764 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
767 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
768 r2 <= LC_FREQ * 2 / REF_MIN;
772 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
774 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
775 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
776 * VCO_MAX * r2 > n2 * LC_FREQ and
777 * VCO_MIN * r2 < n2 * LC_FREQ)
779 * Which means the desired boundaries for n2 are:
780 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
782 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
783 n2 <= VCO_MAX * r2 / LC_FREQ;
786 for (p = P_MIN; p <= P_MAX; p += P_INC)
787 hsw_wrpll_update_rnp(freq2k, budget,
797 static struct intel_shared_dpll *
798 hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
799 struct intel_crtc *crtc)
801 struct intel_crtc_state *crtc_state =
802 intel_atomic_get_new_crtc_state(state, crtc);
803 struct intel_shared_dpll *pll;
805 unsigned int p, n2, r2;
807 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
809 val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
810 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
811 WRPLL_DIVIDER_POST(p);
813 crtc_state->dpll_hw_state.wrpll = val;
815 pll = intel_find_shared_dpll(state, crtc,
816 &crtc_state->dpll_hw_state,
817 DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
825 static struct intel_shared_dpll *
826 hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
828 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
829 struct intel_shared_dpll *pll;
830 enum intel_dpll_id pll_id;
831 int clock = crtc_state->port_clock;
835 pll_id = DPLL_ID_LCPLL_810;
838 pll_id = DPLL_ID_LCPLL_1350;
841 pll_id = DPLL_ID_LCPLL_2700;
844 DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
848 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
856 static bool hsw_get_dpll(struct intel_atomic_state *state,
857 struct intel_crtc *crtc,
858 struct intel_encoder *encoder)
860 struct intel_crtc_state *crtc_state =
861 intel_atomic_get_new_crtc_state(state, crtc);
862 struct intel_shared_dpll *pll;
864 memset(&crtc_state->dpll_hw_state, 0,
865 sizeof(crtc_state->dpll_hw_state));
867 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
868 pll = hsw_ddi_hdmi_get_dpll(state, crtc);
869 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
870 pll = hsw_ddi_dp_get_dpll(crtc_state);
871 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
872 if (WARN_ON(crtc_state->port_clock / 2 != 135000))
875 crtc_state->dpll_hw_state.spll =
876 SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
878 pll = intel_find_shared_dpll(state, crtc,
879 &crtc_state->dpll_hw_state,
880 DPLL_ID_SPLL, DPLL_ID_SPLL);
888 intel_reference_shared_dpll(state, crtc,
889 pll, &crtc_state->dpll_hw_state);
891 crtc_state->shared_dpll = pll;
896 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
897 const struct intel_dpll_hw_state *hw_state)
899 DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
900 hw_state->wrpll, hw_state->spll);
903 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
904 .enable = hsw_ddi_wrpll_enable,
905 .disable = hsw_ddi_wrpll_disable,
906 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
909 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
910 .enable = hsw_ddi_spll_enable,
911 .disable = hsw_ddi_spll_disable,
912 .get_hw_state = hsw_ddi_spll_get_hw_state,
915 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
916 struct intel_shared_dpll *pll)
920 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
921 struct intel_shared_dpll *pll)
925 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
926 struct intel_shared_dpll *pll,
927 struct intel_dpll_hw_state *hw_state)
932 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
933 .enable = hsw_ddi_lcpll_enable,
934 .disable = hsw_ddi_lcpll_disable,
935 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
938 struct skl_dpll_regs {
939 i915_reg_t ctl, cfgcr1, cfgcr2;
942 /* this array is indexed by the *shared* pll id */
943 static const struct skl_dpll_regs skl_dpll_regs[4] = {
947 /* DPLL 0 doesn't support HDMI mode */
952 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
953 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
958 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
959 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
964 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
965 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
969 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
970 struct intel_shared_dpll *pll)
972 const enum intel_dpll_id id = pll->info->id;
975 val = I915_READ(DPLL_CTRL1);
977 val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
979 DPLL_CTRL1_LINK_RATE_MASK(id));
980 val |= pll->state.hw_state.ctrl1 << (id * 6);
982 I915_WRITE(DPLL_CTRL1, val);
983 POSTING_READ(DPLL_CTRL1);
986 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
987 struct intel_shared_dpll *pll)
989 const struct skl_dpll_regs *regs = skl_dpll_regs;
990 const enum intel_dpll_id id = pll->info->id;
992 skl_ddi_pll_write_ctrl1(dev_priv, pll);
994 I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
995 I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
996 POSTING_READ(regs[id].cfgcr1);
997 POSTING_READ(regs[id].cfgcr2);
999 /* the enable bit is always bit 31 */
1000 I915_WRITE(regs[id].ctl,
1001 I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
1003 if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1004 DRM_ERROR("DPLL %d not locked\n", id);
1007 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1008 struct intel_shared_dpll *pll)
1010 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1013 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1014 struct intel_shared_dpll *pll)
1016 const struct skl_dpll_regs *regs = skl_dpll_regs;
1017 const enum intel_dpll_id id = pll->info->id;
1019 /* the enable bit is always bit 31 */
1020 I915_WRITE(regs[id].ctl,
1021 I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1022 POSTING_READ(regs[id].ctl);
1025 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1026 struct intel_shared_dpll *pll)
1030 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1031 struct intel_shared_dpll *pll,
1032 struct intel_dpll_hw_state *hw_state)
1035 const struct skl_dpll_regs *regs = skl_dpll_regs;
1036 const enum intel_dpll_id id = pll->info->id;
1037 intel_wakeref_t wakeref;
1040 wakeref = intel_display_power_get_if_enabled(dev_priv,
1041 POWER_DOMAIN_DISPLAY_CORE);
1047 val = I915_READ(regs[id].ctl);
1048 if (!(val & LCPLL_PLL_ENABLE))
1051 val = I915_READ(DPLL_CTRL1);
1052 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1054 /* avoid reading back stale values if HDMI mode is not enabled */
1055 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1056 hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
1057 hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
1062 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1067 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1068 struct intel_shared_dpll *pll,
1069 struct intel_dpll_hw_state *hw_state)
1071 const struct skl_dpll_regs *regs = skl_dpll_regs;
1072 const enum intel_dpll_id id = pll->info->id;
1073 intel_wakeref_t wakeref;
1077 wakeref = intel_display_power_get_if_enabled(dev_priv,
1078 POWER_DOMAIN_DISPLAY_CORE);
1084 /* DPLL0 is always enabled since it drives CDCLK */
1085 val = I915_READ(regs[id].ctl);
1086 if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
1089 val = I915_READ(DPLL_CTRL1);
1090 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1095 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1100 struct skl_wrpll_context {
1101 u64 min_deviation; /* current minimal deviation */
1102 u64 central_freq; /* chosen central freq */
1103 u64 dco_freq; /* chosen dco freq */
1104 unsigned int p; /* chosen divider */
1107 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1109 memset(ctx, 0, sizeof(*ctx));
1111 ctx->min_deviation = U64_MAX;
1114 /* DCO freq must be within +1%/-6% of the DCO central freq */
1115 #define SKL_DCO_MAX_PDEVIATION 100
1116 #define SKL_DCO_MAX_NDEVIATION 600
1118 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1121 unsigned int divider)
1125 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1128 /* positive deviation */
1129 if (dco_freq >= central_freq) {
1130 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1131 deviation < ctx->min_deviation) {
1132 ctx->min_deviation = deviation;
1133 ctx->central_freq = central_freq;
1134 ctx->dco_freq = dco_freq;
1137 /* negative deviation */
1138 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1139 deviation < ctx->min_deviation) {
1140 ctx->min_deviation = deviation;
1141 ctx->central_freq = central_freq;
1142 ctx->dco_freq = dco_freq;
1147 static void skl_wrpll_get_multipliers(unsigned int p,
1148 unsigned int *p0 /* out */,
1149 unsigned int *p1 /* out */,
1150 unsigned int *p2 /* out */)
1154 unsigned int half = p / 2;
1156 if (half == 1 || half == 2 || half == 3 || half == 5) {
1160 } else if (half % 2 == 0) {
1164 } else if (half % 3 == 0) {
1168 } else if (half % 7 == 0) {
1173 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1177 } else if (p == 5 || p == 7) {
1181 } else if (p == 15) {
1185 } else if (p == 21) {
1189 } else if (p == 35) {
1196 struct skl_wrpll_params {
1206 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1209 u32 p0, u32 p1, u32 p2)
1213 switch (central_freq) {
1215 params->central_freq = 0;
1218 params->central_freq = 1;
1221 params->central_freq = 3;
1238 WARN(1, "Incorrect PDiv\n");
1255 WARN(1, "Incorrect KDiv\n");
1258 params->qdiv_ratio = p1;
1259 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1261 dco_freq = p0 * p1 * p2 * afe_clock;
1264 * Intermediate values are in Hz.
1265 * Divide by MHz to match bsepc
1267 params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1268 params->dco_fraction =
1269 div_u64((div_u64(dco_freq, 24) -
1270 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1274 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1275 struct skl_wrpll_params *wrpll_params)
1277 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1278 u64 dco_central_freq[3] = { 8400000000ULL,
1281 static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1282 24, 28, 30, 32, 36, 40, 42, 44,
1283 48, 52, 54, 56, 60, 64, 66, 68,
1284 70, 72, 76, 78, 80, 84, 88, 90,
1286 static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1287 static const struct {
1291 { even_dividers, ARRAY_SIZE(even_dividers) },
1292 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1294 struct skl_wrpll_context ctx;
1295 unsigned int dco, d, i;
1296 unsigned int p0, p1, p2;
1298 skl_wrpll_context_init(&ctx);
1300 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1301 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1302 for (i = 0; i < dividers[d].n_dividers; i++) {
1303 unsigned int p = dividers[d].list[i];
1304 u64 dco_freq = p * afe_clock;
1306 skl_wrpll_try_divider(&ctx,
1307 dco_central_freq[dco],
1311 * Skip the remaining dividers if we're sure to
1312 * have found the definitive divider, we can't
1313 * improve a 0 deviation.
1315 if (ctx.min_deviation == 0)
1316 goto skip_remaining_dividers;
1320 skip_remaining_dividers:
1322 * If a solution is found with an even divider, prefer
1325 if (d == 0 && ctx.p)
1330 DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1335 * gcc incorrectly analyses that these can be used without being
1336 * initialized. To be fair, it's hard to guess.
1339 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1340 skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1346 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1348 u32 ctrl1, cfgcr1, cfgcr2;
1349 struct skl_wrpll_params wrpll_params = { 0, };
1352 * See comment in intel_dpll_hw_state to understand why we always use 0
1353 * as the DPLL id in this function.
1355 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1357 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1359 if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1363 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1364 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1365 wrpll_params.dco_integer;
1367 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1368 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1369 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1370 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1371 wrpll_params.central_freq;
1373 memset(&crtc_state->dpll_hw_state, 0,
1374 sizeof(crtc_state->dpll_hw_state));
1376 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1377 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1378 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1383 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1388 * See comment in intel_dpll_hw_state to understand why we always use 0
1389 * as the DPLL id in this function.
1391 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1392 switch (crtc_state->port_clock / 2) {
1394 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1397 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1400 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1404 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1407 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1410 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1414 memset(&crtc_state->dpll_hw_state, 0,
1415 sizeof(crtc_state->dpll_hw_state));
1417 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1422 static bool skl_get_dpll(struct intel_atomic_state *state,
1423 struct intel_crtc *crtc,
1424 struct intel_encoder *encoder)
1426 struct intel_crtc_state *crtc_state =
1427 intel_atomic_get_new_crtc_state(state, crtc);
1428 struct intel_shared_dpll *pll;
1431 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1432 bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1434 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
1437 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1438 bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1440 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
1447 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1448 pll = intel_find_shared_dpll(state, crtc,
1449 &crtc_state->dpll_hw_state,
1453 pll = intel_find_shared_dpll(state, crtc,
1454 &crtc_state->dpll_hw_state,
1460 intel_reference_shared_dpll(state, crtc,
1461 pll, &crtc_state->dpll_hw_state);
1463 crtc_state->shared_dpll = pll;
1468 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1469 const struct intel_dpll_hw_state *hw_state)
1471 DRM_DEBUG_KMS("dpll_hw_state: "
1472 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1478 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1479 .enable = skl_ddi_pll_enable,
1480 .disable = skl_ddi_pll_disable,
1481 .get_hw_state = skl_ddi_pll_get_hw_state,
1484 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1485 .enable = skl_ddi_dpll0_enable,
1486 .disable = skl_ddi_dpll0_disable,
1487 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1490 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1491 struct intel_shared_dpll *pll)
1494 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1496 enum dpio_channel ch;
1498 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1500 /* Non-SSC reference */
1501 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1502 temp |= PORT_PLL_REF_SEL;
1503 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1505 if (IS_GEMINILAKE(dev_priv)) {
1506 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1507 temp |= PORT_PLL_POWER_ENABLE;
1508 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1510 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1511 PORT_PLL_POWER_STATE), 200))
1512 DRM_ERROR("Power state not set for PLL:%d\n", port);
1515 /* Disable 10 bit clock */
1516 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1517 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1518 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1521 temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1522 temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1523 temp |= pll->state.hw_state.ebb0;
1524 I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
1526 /* Write M2 integer */
1527 temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1528 temp &= ~PORT_PLL_M2_MASK;
1529 temp |= pll->state.hw_state.pll0;
1530 I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
1533 temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1534 temp &= ~PORT_PLL_N_MASK;
1535 temp |= pll->state.hw_state.pll1;
1536 I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
1538 /* Write M2 fraction */
1539 temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1540 temp &= ~PORT_PLL_M2_FRAC_MASK;
1541 temp |= pll->state.hw_state.pll2;
1542 I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
1544 /* Write M2 fraction enable */
1545 temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1546 temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1547 temp |= pll->state.hw_state.pll3;
1548 I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
1551 temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1552 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1553 temp &= ~PORT_PLL_INT_COEFF_MASK;
1554 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1555 temp |= pll->state.hw_state.pll6;
1556 I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
1558 /* Write calibration val */
1559 temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1560 temp &= ~PORT_PLL_TARGET_CNT_MASK;
1561 temp |= pll->state.hw_state.pll8;
1562 I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
1564 temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1565 temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1566 temp |= pll->state.hw_state.pll9;
1567 I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
1569 temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1570 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1571 temp &= ~PORT_PLL_DCO_AMP_MASK;
1572 temp |= pll->state.hw_state.pll10;
1573 I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
1575 /* Recalibrate with new settings */
1576 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1577 temp |= PORT_PLL_RECALIBRATE;
1578 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1579 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1580 temp |= pll->state.hw_state.ebb4;
1581 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1584 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1585 temp |= PORT_PLL_ENABLE;
1586 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1587 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1589 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1591 DRM_ERROR("PLL %d not locked\n", port);
1593 if (IS_GEMINILAKE(dev_priv)) {
1594 temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
1595 temp |= DCC_DELAY_RANGE_2;
1596 I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1600 * While we write to the group register to program all lanes at once we
1601 * can read only lane registers and we pick lanes 0/1 for that.
1603 temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1604 temp &= ~LANE_STAGGER_MASK;
1605 temp &= ~LANESTAGGER_STRAP_OVRD;
1606 temp |= pll->state.hw_state.pcsdw12;
1607 I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1610 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1611 struct intel_shared_dpll *pll)
1613 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1616 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1617 temp &= ~PORT_PLL_ENABLE;
1618 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1619 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1621 if (IS_GEMINILAKE(dev_priv)) {
1622 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1623 temp &= ~PORT_PLL_POWER_ENABLE;
1624 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1626 if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1627 PORT_PLL_POWER_STATE), 200))
1628 DRM_ERROR("Power state not reset for PLL:%d\n", port);
1632 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1633 struct intel_shared_dpll *pll,
1634 struct intel_dpll_hw_state *hw_state)
1636 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1637 intel_wakeref_t wakeref;
1639 enum dpio_channel ch;
1643 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1645 wakeref = intel_display_power_get_if_enabled(dev_priv,
1646 POWER_DOMAIN_DISPLAY_CORE);
1652 val = I915_READ(BXT_PORT_PLL_ENABLE(port));
1653 if (!(val & PORT_PLL_ENABLE))
1656 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1657 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1659 hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1660 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1662 hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1663 hw_state->pll0 &= PORT_PLL_M2_MASK;
1665 hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1666 hw_state->pll1 &= PORT_PLL_N_MASK;
1668 hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1669 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1671 hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1672 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1674 hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1675 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1676 PORT_PLL_INT_COEFF_MASK |
1677 PORT_PLL_GAIN_CTL_MASK;
1679 hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1680 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1682 hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1683 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1685 hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1686 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1687 PORT_PLL_DCO_AMP_MASK;
1690 * While we write to the group register to program all lanes at once we
1691 * can read only lane registers. We configure all lanes the same way, so
1692 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1694 hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1695 if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1696 DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1698 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
1699 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1704 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1709 /* bxt clock parameters */
1710 struct bxt_clk_div {
1722 /* pre-calculated values for DP linkrates */
1723 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1724 {162000, 4, 2, 32, 1677722, 1, 1},
1725 {270000, 4, 1, 27, 0, 0, 1},
1726 {540000, 2, 1, 27, 0, 0, 1},
1727 {216000, 3, 2, 32, 1677722, 1, 1},
1728 {243000, 4, 1, 24, 1258291, 1, 1},
1729 {324000, 4, 1, 32, 1677722, 1, 1},
1730 {432000, 3, 1, 32, 1677722, 1, 1}
1734 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
1735 struct bxt_clk_div *clk_div)
1737 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1738 struct dpll best_clock;
1740 /* Calculate HDMI div */
1742 * FIXME: tie the following calculation into
1743 * i9xx_crtc_compute_clock
1745 if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
1746 DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
1747 crtc_state->port_clock,
1748 pipe_name(crtc->pipe));
1752 clk_div->p1 = best_clock.p1;
1753 clk_div->p2 = best_clock.p2;
1754 WARN_ON(best_clock.m1 != 2);
1755 clk_div->n = best_clock.n;
1756 clk_div->m2_int = best_clock.m2 >> 22;
1757 clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1758 clk_div->m2_frac_en = clk_div->m2_frac != 0;
1760 clk_div->vco = best_clock.vco;
1765 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
1766 struct bxt_clk_div *clk_div)
1768 int clock = crtc_state->port_clock;
1771 *clk_div = bxt_dp_clk_val[0];
1772 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1773 if (bxt_dp_clk_val[i].clock == clock) {
1774 *clk_div = bxt_dp_clk_val[i];
1779 clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1782 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
1783 const struct bxt_clk_div *clk_div)
1785 struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
1786 int clock = crtc_state->port_clock;
1787 int vco = clk_div->vco;
1788 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
1791 memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
1793 if (vco >= 6200000 && vco <= 6700000) {
1798 } else if ((vco > 5400000 && vco < 6200000) ||
1799 (vco >= 4800000 && vco < 5400000)) {
1804 } else if (vco == 5400000) {
1810 DRM_ERROR("Invalid VCO\n");
1816 else if (clock > 135000)
1818 else if (clock > 67000)
1820 else if (clock > 33000)
1825 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1826 dpll_hw_state->pll0 = clk_div->m2_int;
1827 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1828 dpll_hw_state->pll2 = clk_div->m2_frac;
1830 if (clk_div->m2_frac_en)
1831 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1833 dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1834 dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1836 dpll_hw_state->pll8 = targ_cnt;
1838 dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1840 dpll_hw_state->pll10 =
1841 PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1842 | PORT_PLL_DCO_AMP_OVR_EN_H;
1844 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1846 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1852 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1854 struct bxt_clk_div clk_div = {};
1856 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
1858 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1862 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1864 struct bxt_clk_div clk_div = {};
1866 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
1868 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1871 static bool bxt_get_dpll(struct intel_atomic_state *state,
1872 struct intel_crtc *crtc,
1873 struct intel_encoder *encoder)
1875 struct intel_crtc_state *crtc_state =
1876 intel_atomic_get_new_crtc_state(state, crtc);
1877 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1878 struct intel_shared_dpll *pll;
1879 enum intel_dpll_id id;
1881 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
1882 !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
1885 if (intel_crtc_has_dp_encoder(crtc_state) &&
1886 !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
1889 /* 1:1 mapping between ports and PLLs */
1890 id = (enum intel_dpll_id) encoder->port;
1891 pll = intel_get_shared_dpll_by_id(dev_priv, id);
1893 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1894 crtc->base.base.id, crtc->base.name, pll->info->name);
1896 intel_reference_shared_dpll(state, crtc,
1897 pll, &crtc_state->dpll_hw_state);
1899 crtc_state->shared_dpll = pll;
1904 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1905 const struct intel_dpll_hw_state *hw_state)
1907 DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1908 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1909 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1923 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1924 .enable = bxt_ddi_pll_enable,
1925 .disable = bxt_ddi_pll_disable,
1926 .get_hw_state = bxt_ddi_pll_get_hw_state,
1929 struct intel_dpll_mgr {
1930 const struct dpll_info *dpll_info;
1932 bool (*get_dplls)(struct intel_atomic_state *state,
1933 struct intel_crtc *crtc,
1934 struct intel_encoder *encoder);
1935 void (*put_dplls)(struct intel_atomic_state *state,
1936 struct intel_crtc *crtc);
1937 void (*update_active_dpll)(struct intel_atomic_state *state,
1938 struct intel_crtc *crtc,
1939 struct intel_encoder *encoder);
1940 void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1941 const struct intel_dpll_hw_state *hw_state);
1944 static const struct dpll_info pch_plls[] = {
1945 { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
1946 { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
1950 static const struct intel_dpll_mgr pch_pll_mgr = {
1951 .dpll_info = pch_plls,
1952 .get_dplls = ibx_get_dpll,
1953 .put_dplls = intel_put_dpll,
1954 .dump_hw_state = ibx_dump_hw_state,
1957 static const struct dpll_info hsw_plls[] = {
1958 { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
1959 { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
1960 { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
1961 { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
1962 { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1963 { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1967 static const struct intel_dpll_mgr hsw_pll_mgr = {
1968 .dpll_info = hsw_plls,
1969 .get_dplls = hsw_get_dpll,
1970 .put_dplls = intel_put_dpll,
1971 .dump_hw_state = hsw_dump_hw_state,
1974 static const struct dpll_info skl_plls[] = {
1975 { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1976 { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1977 { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1978 { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
1982 static const struct intel_dpll_mgr skl_pll_mgr = {
1983 .dpll_info = skl_plls,
1984 .get_dplls = skl_get_dpll,
1985 .put_dplls = intel_put_dpll,
1986 .dump_hw_state = skl_dump_hw_state,
1989 static const struct dpll_info bxt_plls[] = {
1990 { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
1991 { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1992 { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1996 static const struct intel_dpll_mgr bxt_pll_mgr = {
1997 .dpll_info = bxt_plls,
1998 .get_dplls = bxt_get_dpll,
1999 .put_dplls = intel_put_dpll,
2000 .dump_hw_state = bxt_dump_hw_state,
2003 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2004 struct intel_shared_dpll *pll)
2006 const enum intel_dpll_id id = pll->info->id;
2009 /* 1. Enable DPLL power in DPLL_ENABLE. */
2010 val = I915_READ(CNL_DPLL_ENABLE(id));
2011 val |= PLL_POWER_ENABLE;
2012 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2014 /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2015 if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2016 PLL_POWER_STATE, 5))
2017 DRM_ERROR("PLL %d Power not enabled\n", id);
2020 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2021 * select DP mode, and set DP link rate.
2023 val = pll->state.hw_state.cfgcr0;
2024 I915_WRITE(CNL_DPLL_CFGCR0(id), val);
2026 /* 4. Reab back to ensure writes completed */
2027 POSTING_READ(CNL_DPLL_CFGCR0(id));
2029 /* 3. Configure DPLL_CFGCR0 */
2030 /* Avoid touch CFGCR1 if HDMI mode is not enabled */
2031 if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2032 val = pll->state.hw_state.cfgcr1;
2033 I915_WRITE(CNL_DPLL_CFGCR1(id), val);
2034 /* 4. Reab back to ensure writes completed */
2035 POSTING_READ(CNL_DPLL_CFGCR1(id));
2039 * 5. If the frequency will result in a change to the voltage
2040 * requirement, follow the Display Voltage Frequency Switching
2041 * Sequence Before Frequency Change
2043 * Note: DVFS is actually handled via the cdclk code paths,
2044 * hence we do nothing here.
2047 /* 6. Enable DPLL in DPLL_ENABLE. */
2048 val = I915_READ(CNL_DPLL_ENABLE(id));
2050 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2052 /* 7. Wait for PLL lock status in DPLL_ENABLE. */
2053 if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2054 DRM_ERROR("PLL %d not locked\n", id);
2057 * 8. If the frequency will result in a change to the voltage
2058 * requirement, follow the Display Voltage Frequency Switching
2059 * Sequence After Frequency Change
2061 * Note: DVFS is actually handled via the cdclk code paths,
2062 * hence we do nothing here.
2066 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2067 * Done at intel_ddi_clk_select
2071 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2072 struct intel_shared_dpll *pll)
2074 const enum intel_dpll_id id = pll->info->id;
2078 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2079 * Done at intel_ddi_post_disable
2083 * 2. If the frequency will result in a change to the voltage
2084 * requirement, follow the Display Voltage Frequency Switching
2085 * Sequence Before Frequency Change
2087 * Note: DVFS is actually handled via the cdclk code paths,
2088 * hence we do nothing here.
2091 /* 3. Disable DPLL through DPLL_ENABLE. */
2092 val = I915_READ(CNL_DPLL_ENABLE(id));
2094 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2096 /* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2097 if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2098 DRM_ERROR("PLL %d locked\n", id);
2101 * 5. If the frequency will result in a change to the voltage
2102 * requirement, follow the Display Voltage Frequency Switching
2103 * Sequence After Frequency Change
2105 * Note: DVFS is actually handled via the cdclk code paths,
2106 * hence we do nothing here.
2109 /* 6. Disable DPLL power in DPLL_ENABLE. */
2110 val = I915_READ(CNL_DPLL_ENABLE(id));
2111 val &= ~PLL_POWER_ENABLE;
2112 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2114 /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2115 if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2116 PLL_POWER_STATE, 5))
2117 DRM_ERROR("PLL %d Power not disabled\n", id);
2120 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2121 struct intel_shared_dpll *pll,
2122 struct intel_dpll_hw_state *hw_state)
2124 const enum intel_dpll_id id = pll->info->id;
2125 intel_wakeref_t wakeref;
2129 wakeref = intel_display_power_get_if_enabled(dev_priv,
2130 POWER_DOMAIN_DISPLAY_CORE);
2136 val = I915_READ(CNL_DPLL_ENABLE(id));
2137 if (!(val & PLL_ENABLE))
2140 val = I915_READ(CNL_DPLL_CFGCR0(id));
2141 hw_state->cfgcr0 = val;
2143 /* avoid reading back stale values if HDMI mode is not enabled */
2144 if (val & DPLL_CFGCR0_HDMI_MODE) {
2145 hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
2150 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2155 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2156 int *qdiv, int *kdiv)
2159 if (bestdiv % 2 == 0) {
2164 } else if (bestdiv % 4 == 0) {
2166 *qdiv = bestdiv / 4;
2168 } else if (bestdiv % 6 == 0) {
2170 *qdiv = bestdiv / 6;
2172 } else if (bestdiv % 5 == 0) {
2174 *qdiv = bestdiv / 10;
2176 } else if (bestdiv % 14 == 0) {
2178 *qdiv = bestdiv / 14;
2182 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2186 } else { /* 9, 15, 21 */
2187 *pdiv = bestdiv / 3;
2194 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2195 u32 dco_freq, u32 ref_freq,
2196 int pdiv, int qdiv, int kdiv)
2211 WARN(1, "Incorrect KDiv\n");
2228 WARN(1, "Incorrect PDiv\n");
2231 WARN_ON(kdiv != 2 && qdiv != 1);
2233 params->qdiv_ratio = qdiv;
2234 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2236 dco = div_u64((u64)dco_freq << 15, ref_freq);
2238 params->dco_integer = dco >> 15;
2239 params->dco_fraction = dco & 0x7fff;
2242 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
2244 int ref_clock = dev_priv->cdclk.hw.ref;
2247 * For ICL+, the spec states: if reference frequency is 38.4,
2248 * use 19.2 because the DPLL automatically divides that by 2.
2250 if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
2257 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2258 struct skl_wrpll_params *wrpll_params)
2260 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2261 u32 afe_clock = crtc_state->port_clock * 5;
2263 u32 dco_min = 7998000;
2264 u32 dco_max = 10000000;
2265 u32 dco_mid = (dco_min + dco_max) / 2;
2266 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2267 18, 20, 24, 28, 30, 32, 36, 40,
2268 42, 44, 48, 50, 52, 54, 56, 60,
2269 64, 66, 68, 70, 72, 76, 78, 80,
2270 84, 88, 90, 92, 96, 98, 100, 102,
2271 3, 5, 7, 9, 15, 21 };
2272 u32 dco, best_dco = 0, dco_centrality = 0;
2273 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2274 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2276 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2277 dco = afe_clock * dividers[d];
2279 if ((dco <= dco_max) && (dco >= dco_min)) {
2280 dco_centrality = abs(dco - dco_mid);
2282 if (dco_centrality < best_dco_centrality) {
2283 best_dco_centrality = dco_centrality;
2284 best_div = dividers[d];
2293 cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2295 ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
2297 cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2303 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2306 struct skl_wrpll_params wrpll_params = { 0, };
2308 cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2310 if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2313 cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2314 wrpll_params.dco_integer;
2316 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2317 DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2318 DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2319 DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2320 DPLL_CFGCR1_CENTRAL_FREQ;
2322 memset(&crtc_state->dpll_hw_state, 0,
2323 sizeof(crtc_state->dpll_hw_state));
2325 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2326 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2331 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2335 cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2337 switch (crtc_state->port_clock / 2) {
2339 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2342 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2345 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2349 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2352 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2355 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2358 /* Some SKUs may require elevated I/O voltage to support this */
2359 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2362 /* Some SKUs may require elevated I/O voltage to support this */
2363 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2367 memset(&crtc_state->dpll_hw_state, 0,
2368 sizeof(crtc_state->dpll_hw_state));
2370 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2375 static bool cnl_get_dpll(struct intel_atomic_state *state,
2376 struct intel_crtc *crtc,
2377 struct intel_encoder *encoder)
2379 struct intel_crtc_state *crtc_state =
2380 intel_atomic_get_new_crtc_state(state, crtc);
2381 struct intel_shared_dpll *pll;
2384 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2385 bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2387 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
2390 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
2391 bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2393 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
2397 DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
2398 crtc_state->output_types);
2402 pll = intel_find_shared_dpll(state, crtc,
2403 &crtc_state->dpll_hw_state,
2407 DRM_DEBUG_KMS("No PLL selected\n");
2411 intel_reference_shared_dpll(state, crtc,
2412 pll, &crtc_state->dpll_hw_state);
2414 crtc_state->shared_dpll = pll;
2419 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2420 const struct intel_dpll_hw_state *hw_state)
2422 DRM_DEBUG_KMS("dpll_hw_state: "
2423 "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2428 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2429 .enable = cnl_ddi_pll_enable,
2430 .disable = cnl_ddi_pll_disable,
2431 .get_hw_state = cnl_ddi_pll_get_hw_state,
2434 static const struct dpll_info cnl_plls[] = {
2435 { "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2436 { "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2437 { "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2441 static const struct intel_dpll_mgr cnl_pll_mgr = {
2442 .dpll_info = cnl_plls,
2443 .get_dplls = cnl_get_dpll,
2444 .put_dplls = intel_put_dpll,
2445 .dump_hw_state = cnl_dump_hw_state,
2448 struct icl_combo_pll_params {
2450 struct skl_wrpll_params wrpll;
2454 * These values alrea already adjusted: they're the bits we write to the
2455 * registers, not the logical values.
2457 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2459 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2460 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2462 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2463 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2465 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2466 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2468 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2469 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2471 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2472 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2474 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2475 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2477 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2478 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2480 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2481 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2485 /* Also used for 38.4 MHz values. */
2486 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2488 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2489 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2491 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2492 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2494 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2495 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2497 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2498 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2500 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2501 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2503 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2504 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2506 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2507 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2509 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2510 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2513 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2514 .dco_integer = 0x151, .dco_fraction = 0x4000,
2515 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2518 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2519 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2520 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2523 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2524 struct skl_wrpll_params *pll_params)
2526 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2527 const struct icl_combo_pll_params *params =
2528 dev_priv->cdclk.hw.ref == 24000 ?
2529 icl_dp_combo_pll_24MHz_values :
2530 icl_dp_combo_pll_19_2MHz_values;
2531 int clock = crtc_state->port_clock;
2534 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2535 if (clock == params[i].clock) {
2536 *pll_params = params[i].wrpll;
2541 MISSING_CASE(clock);
2545 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2546 struct skl_wrpll_params *pll_params)
2548 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2550 *pll_params = dev_priv->cdclk.hw.ref == 24000 ?
2551 icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
2555 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
2556 struct intel_encoder *encoder,
2557 struct intel_dpll_hw_state *pll_state)
2559 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2561 struct skl_wrpll_params pll_params = { 0 };
2564 if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv,
2566 ret = icl_calc_tbt_pll(crtc_state, &pll_params);
2567 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
2568 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
2569 ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
2571 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
2576 cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
2577 pll_params.dco_integer;
2579 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
2580 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
2581 DPLL_CFGCR1_KDIV(pll_params.kdiv) |
2582 DPLL_CFGCR1_PDIV(pll_params.pdiv);
2584 if (INTEL_GEN(dev_priv) >= 12)
2585 cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2587 cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2589 memset(pll_state, 0, sizeof(*pll_state));
2591 pll_state->cfgcr0 = cfgcr0;
2592 pll_state->cfgcr1 = cfgcr1;
2598 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
2600 return id - DPLL_ID_ICL_MGPLL1;
2603 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
2605 return tc_port + DPLL_ID_ICL_MGPLL1;
2608 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2609 u32 *target_dco_khz,
2610 struct intel_dpll_hw_state *state)
2612 u32 dco_min_freq, dco_max_freq;
2613 int div1_vals[] = {7, 5, 3, 2};
2617 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2618 dco_max_freq = is_dp ? 8100000 : 10000000;
2620 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2621 int div1 = div1_vals[i];
2623 for (div2 = 10; div2 > 0; div2--) {
2624 int dco = div1 * div2 * clock_khz * 5;
2625 int a_divratio, tlinedrv, inputsel;
2628 if (dco < dco_min_freq || dco > dco_max_freq)
2632 a_divratio = is_dp ? 10 : 5;
2638 inputsel = is_dp ? 0 : 1;
2645 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2648 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2651 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2654 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2658 *target_dco_khz = dco;
2660 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2662 state->mg_clktop2_coreclkctl1 =
2663 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2665 state->mg_clktop2_hsclkctl =
2666 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2667 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2669 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2679 * The specification for this function uses real numbers, so the math had to be
2680 * adapted to integer-only calculation, that's why it looks so different.
2682 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2683 struct intel_dpll_hw_state *pll_state)
2685 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2686 int refclk_khz = dev_priv->cdclk.hw.ref;
2687 int clock = crtc_state->port_clock;
2688 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2689 u32 iref_ndiv, iref_trim, iref_pulse_w;
2690 u32 prop_coeff, int_coeff;
2691 u32 tdc_targetcnt, feedfwgain;
2692 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2694 bool use_ssc = false;
2695 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2697 memset(pll_state, 0, sizeof(*pll_state));
2699 if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2701 DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
2706 m2div_int = dco_khz / (refclk_khz * m1div);
2707 if (m2div_int > 255) {
2709 m2div_int = dco_khz / (refclk_khz * m1div);
2710 if (m2div_int > 255) {
2711 DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
2716 m2div_rem = dco_khz % (refclk_khz * m1div);
2718 tmp = (u64)m2div_rem * (1 << 22);
2719 do_div(tmp, refclk_khz * m1div);
2722 switch (refclk_khz) {
2739 MISSING_CASE(refclk_khz);
2744 * tdc_res = 0.000003
2745 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2747 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2748 * was supposed to be a division, but we rearranged the operations of
2749 * the formula to avoid early divisions so we don't multiply the
2752 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2753 * we also rearrange to work with integers.
2755 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2756 * last division by 10.
2758 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2761 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2762 * 32 bits. That's not a problem since we round the division down
2765 feedfwgain = (use_ssc || m2div_rem > 0) ?
2766 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2768 if (dco_khz >= 9000000) {
2777 tmp = mul_u32_u32(dco_khz, 47 * 32);
2778 do_div(tmp, refclk_khz * m1div * 10000);
2781 tmp = mul_u32_u32(dco_khz, 1000);
2782 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2789 pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2790 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2791 MG_PLL_DIV0_FBDIV_INT(m2div_int);
2793 pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2794 MG_PLL_DIV1_DITHER_DIV_2 |
2795 MG_PLL_DIV1_NDIVRATIO(1) |
2796 MG_PLL_DIV1_FBPREDIV(m1div);
2798 pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2799 MG_PLL_LF_AFCCNTSEL_512 |
2800 MG_PLL_LF_GAINCTRL(1) |
2801 MG_PLL_LF_INT_COEFF(int_coeff) |
2802 MG_PLL_LF_PROP_COEFF(prop_coeff);
2804 pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2805 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2806 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2807 MG_PLL_FRAC_LOCK_DCODITHEREN |
2808 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2809 if (use_ssc || m2div_rem > 0)
2810 pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2812 pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
2813 MG_PLL_SSC_TYPE(2) |
2814 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2815 MG_PLL_SSC_STEPNUM(ssc_steplog) |
2817 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2819 pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
2820 MG_PLL_TDC_COLDST_IREFINT_EN |
2821 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2822 MG_PLL_TDC_TDCOVCCORR_EN |
2823 MG_PLL_TDC_TDCSEL(3);
2825 pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
2826 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2827 MG_PLL_BIAS_BIAS_BONUS(10) |
2828 MG_PLL_BIAS_BIASCAL_EN |
2829 MG_PLL_BIAS_CTRIM(12) |
2830 MG_PLL_BIAS_VREF_RDAC(4) |
2831 MG_PLL_BIAS_IREFTRIM(iref_trim);
2833 if (refclk_khz == 38400) {
2834 pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
2835 pll_state->mg_pll_bias_mask = 0;
2837 pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2838 pll_state->mg_pll_bias_mask = -1U;
2841 pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
2842 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2848 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
2849 * @crtc_state: state for the CRTC to select the DPLL for
2850 * @port_dpll_id: the active @port_dpll_id to select
2852 * Select the given @port_dpll_id instance from the DPLLs reserved for the
2855 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
2856 enum icl_port_dpll_id port_dpll_id)
2858 struct icl_port_dpll *port_dpll =
2859 &crtc_state->icl_port_dplls[port_dpll_id];
2861 crtc_state->shared_dpll = port_dpll->pll;
2862 crtc_state->dpll_hw_state = port_dpll->hw_state;
2865 static void icl_update_active_dpll(struct intel_atomic_state *state,
2866 struct intel_crtc *crtc,
2867 struct intel_encoder *encoder)
2869 struct intel_crtc_state *crtc_state =
2870 intel_atomic_get_new_crtc_state(state, crtc);
2871 struct intel_digital_port *primary_port;
2872 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
2874 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
2875 enc_to_mst(&encoder->base)->primary :
2876 enc_to_dig_port(&encoder->base);
2879 (primary_port->tc_mode == TC_PORT_DP_ALT ||
2880 primary_port->tc_mode == TC_PORT_LEGACY))
2881 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
2883 icl_set_active_port_dpll(crtc_state, port_dpll_id);
2886 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
2887 struct intel_crtc *crtc,
2888 struct intel_encoder *encoder)
2890 struct intel_crtc_state *crtc_state =
2891 intel_atomic_get_new_crtc_state(state, crtc);
2892 struct icl_port_dpll *port_dpll =
2893 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
2894 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2895 enum port port = encoder->port;
2896 bool has_dpll4 = false;
2898 if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
2899 DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
2904 if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
2907 port_dpll->pll = intel_find_shared_dpll(state, crtc,
2908 &port_dpll->hw_state,
2910 has_dpll4 ? DPLL_ID_EHL_DPLL4
2911 : DPLL_ID_ICL_DPLL1);
2912 if (!port_dpll->pll) {
2913 DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n",
2914 port_name(encoder->port));
2918 intel_reference_shared_dpll(state, crtc,
2919 port_dpll->pll, &port_dpll->hw_state);
2921 icl_update_active_dpll(state, crtc, encoder);
2926 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
2927 struct intel_crtc *crtc,
2928 struct intel_encoder *encoder)
2930 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2931 struct intel_crtc_state *crtc_state =
2932 intel_atomic_get_new_crtc_state(state, crtc);
2933 struct icl_port_dpll *port_dpll;
2934 enum intel_dpll_id dpll_id;
2936 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
2937 if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
2938 DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n");
2942 port_dpll->pll = intel_find_shared_dpll(state, crtc,
2943 &port_dpll->hw_state,
2945 DPLL_ID_ICL_TBTPLL);
2946 if (!port_dpll->pll) {
2947 DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
2950 intel_reference_shared_dpll(state, crtc,
2951 port_dpll->pll, &port_dpll->hw_state);
2954 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
2955 if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
2956 DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n");
2957 goto err_unreference_tbt_pll;
2960 dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
2962 port_dpll->pll = intel_find_shared_dpll(state, crtc,
2963 &port_dpll->hw_state,
2966 if (!port_dpll->pll) {
2967 DRM_DEBUG_KMS("No MG PHY PLL found\n");
2968 goto err_unreference_tbt_pll;
2970 intel_reference_shared_dpll(state, crtc,
2971 port_dpll->pll, &port_dpll->hw_state);
2973 icl_update_active_dpll(state, crtc, encoder);
2977 err_unreference_tbt_pll:
2978 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
2979 intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
2984 static bool icl_get_dplls(struct intel_atomic_state *state,
2985 struct intel_crtc *crtc,
2986 struct intel_encoder *encoder)
2988 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2989 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
2991 if (intel_phy_is_combo(dev_priv, phy))
2992 return icl_get_combo_phy_dpll(state, crtc, encoder);
2993 else if (intel_phy_is_tc(dev_priv, phy))
2994 return icl_get_tc_phy_dplls(state, crtc, encoder);
3001 static void icl_put_dplls(struct intel_atomic_state *state,
3002 struct intel_crtc *crtc)
3004 const struct intel_crtc_state *old_crtc_state =
3005 intel_atomic_get_old_crtc_state(state, crtc);
3006 struct intel_crtc_state *new_crtc_state =
3007 intel_atomic_get_new_crtc_state(state, crtc);
3008 enum icl_port_dpll_id id;
3010 new_crtc_state->shared_dpll = NULL;
3012 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3013 const struct icl_port_dpll *old_port_dpll =
3014 &old_crtc_state->icl_port_dplls[id];
3015 struct icl_port_dpll *new_port_dpll =
3016 &new_crtc_state->icl_port_dplls[id];
3018 new_port_dpll->pll = NULL;
3020 if (!old_port_dpll->pll)
3023 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3027 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3028 struct intel_shared_dpll *pll,
3029 struct intel_dpll_hw_state *hw_state)
3031 const enum intel_dpll_id id = pll->info->id;
3032 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3033 intel_wakeref_t wakeref;
3037 wakeref = intel_display_power_get_if_enabled(dev_priv,
3038 POWER_DOMAIN_DISPLAY_CORE);
3042 val = I915_READ(MG_PLL_ENABLE(tc_port));
3043 if (!(val & PLL_ENABLE))
3046 hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
3047 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3049 hw_state->mg_clktop2_coreclkctl1 =
3050 I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3051 hw_state->mg_clktop2_coreclkctl1 &=
3052 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3054 hw_state->mg_clktop2_hsclkctl =
3055 I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3056 hw_state->mg_clktop2_hsclkctl &=
3057 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3058 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3059 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3060 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3062 hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
3063 hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
3064 hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
3065 hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
3066 hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
3068 hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
3069 hw_state->mg_pll_tdc_coldst_bias =
3070 I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3072 if (dev_priv->cdclk.hw.ref == 38400) {
3073 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3074 hw_state->mg_pll_bias_mask = 0;
3076 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3077 hw_state->mg_pll_bias_mask = -1U;
3080 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3081 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3085 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3089 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3090 struct intel_shared_dpll *pll,
3091 struct intel_dpll_hw_state *hw_state,
3092 i915_reg_t enable_reg)
3094 const enum intel_dpll_id id = pll->info->id;
3095 intel_wakeref_t wakeref;
3099 wakeref = intel_display_power_get_if_enabled(dev_priv,
3100 POWER_DOMAIN_DISPLAY_CORE);
3104 val = I915_READ(enable_reg);
3105 if (!(val & PLL_ENABLE))
3108 if (INTEL_GEN(dev_priv) >= 12) {
3109 hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id));
3110 hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id));
3112 if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3113 hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4));
3114 hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4));
3116 hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
3117 hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
3123 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3127 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3128 struct intel_shared_dpll *pll,
3129 struct intel_dpll_hw_state *hw_state)
3131 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3133 if (IS_ELKHARTLAKE(dev_priv) &&
3134 pll->info->id == DPLL_ID_EHL_DPLL4) {
3135 enable_reg = MG_PLL_ENABLE(0);
3138 return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3141 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3142 struct intel_shared_dpll *pll,
3143 struct intel_dpll_hw_state *hw_state)
3145 return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3148 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3149 struct intel_shared_dpll *pll)
3151 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3152 const enum intel_dpll_id id = pll->info->id;
3153 i915_reg_t cfgcr0_reg, cfgcr1_reg;
3155 if (INTEL_GEN(dev_priv) >= 12) {
3156 cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3157 cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3159 if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3160 cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3161 cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3163 cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3164 cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3168 I915_WRITE(cfgcr0_reg, hw_state->cfgcr0);
3169 I915_WRITE(cfgcr1_reg, hw_state->cfgcr1);
3170 POSTING_READ(cfgcr1_reg);
3173 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3174 struct intel_shared_dpll *pll)
3176 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3177 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3181 * Some of the following registers have reserved fields, so program
3182 * these with RMW based on a mask. The mask can be fixed or generated
3183 * during the calc/readout phase if the mask depends on some other HW
3184 * state like refclk, see icl_calc_mg_pll_state().
3186 val = I915_READ(MG_REFCLKIN_CTL(tc_port));
3187 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3188 val |= hw_state->mg_refclkin_ctl;
3189 I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
3191 val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3192 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3193 val |= hw_state->mg_clktop2_coreclkctl1;
3194 I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3196 val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3197 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3198 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3199 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3200 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3201 val |= hw_state->mg_clktop2_hsclkctl;
3202 I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
3204 I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3205 I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3206 I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3207 I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
3208 I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3210 val = I915_READ(MG_PLL_BIAS(tc_port));
3211 val &= ~hw_state->mg_pll_bias_mask;
3212 val |= hw_state->mg_pll_bias;
3213 I915_WRITE(MG_PLL_BIAS(tc_port), val);
3215 val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3216 val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3217 val |= hw_state->mg_pll_tdc_coldst_bias;
3218 I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3220 POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3223 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3224 struct intel_shared_dpll *pll,
3225 i915_reg_t enable_reg)
3229 val = I915_READ(enable_reg);
3230 val |= PLL_POWER_ENABLE;
3231 I915_WRITE(enable_reg, val);
3234 * The spec says we need to "wait" but it also says it should be
3237 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3238 DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
3241 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3242 struct intel_shared_dpll *pll,
3243 i915_reg_t enable_reg)
3247 val = I915_READ(enable_reg);
3249 I915_WRITE(enable_reg, val);
3251 /* Timeout is actually 600us. */
3252 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3253 DRM_ERROR("PLL %d not locked\n", pll->info->id);
3256 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3257 struct intel_shared_dpll *pll)
3259 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3261 if (IS_ELKHARTLAKE(dev_priv) &&
3262 pll->info->id == DPLL_ID_EHL_DPLL4) {
3263 enable_reg = MG_PLL_ENABLE(0);
3266 * We need to disable DC states when this DPLL is enabled.
3267 * This can be done by taking a reference on DPLL4 power
3270 pll->wakeref = intel_display_power_get(dev_priv,
3271 POWER_DOMAIN_DPLL_DC_OFF);
3274 icl_pll_power_enable(dev_priv, pll, enable_reg);
3276 icl_dpll_write(dev_priv, pll);
3279 * DVFS pre sequence would be here, but in our driver the cdclk code
3280 * paths should already be setting the appropriate voltage, hence we do
3284 icl_pll_enable(dev_priv, pll, enable_reg);
3286 /* DVFS post sequence would be here. See the comment above. */
3289 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3290 struct intel_shared_dpll *pll)
3292 icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3294 icl_dpll_write(dev_priv, pll);
3297 * DVFS pre sequence would be here, but in our driver the cdclk code
3298 * paths should already be setting the appropriate voltage, hence we do
3302 icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3304 /* DVFS post sequence would be here. See the comment above. */
3307 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3308 struct intel_shared_dpll *pll)
3310 i915_reg_t enable_reg =
3311 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3313 icl_pll_power_enable(dev_priv, pll, enable_reg);
3315 icl_mg_pll_write(dev_priv, pll);
3318 * DVFS pre sequence would be here, but in our driver the cdclk code
3319 * paths should already be setting the appropriate voltage, hence we do
3323 icl_pll_enable(dev_priv, pll, enable_reg);
3325 /* DVFS post sequence would be here. See the comment above. */
3328 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3329 struct intel_shared_dpll *pll,
3330 i915_reg_t enable_reg)
3334 /* The first steps are done by intel_ddi_post_disable(). */
3337 * DVFS pre sequence would be here, but in our driver the cdclk code
3338 * paths should already be setting the appropriate voltage, hence we do
3342 val = I915_READ(enable_reg);
3344 I915_WRITE(enable_reg, val);
3346 /* Timeout is actually 1us. */
3347 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3348 DRM_ERROR("PLL %d locked\n", pll->info->id);
3350 /* DVFS post sequence would be here. See the comment above. */
3352 val = I915_READ(enable_reg);
3353 val &= ~PLL_POWER_ENABLE;
3354 I915_WRITE(enable_reg, val);
3357 * The spec says we need to "wait" but it also says it should be
3360 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3361 DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
3364 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3365 struct intel_shared_dpll *pll)
3367 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3369 if (IS_ELKHARTLAKE(dev_priv) &&
3370 pll->info->id == DPLL_ID_EHL_DPLL4) {
3371 enable_reg = MG_PLL_ENABLE(0);
3372 icl_pll_disable(dev_priv, pll, enable_reg);
3374 intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
3379 icl_pll_disable(dev_priv, pll, enable_reg);
3382 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3383 struct intel_shared_dpll *pll)
3385 icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3388 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3389 struct intel_shared_dpll *pll)
3391 i915_reg_t enable_reg =
3392 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3394 icl_pll_disable(dev_priv, pll, enable_reg);
3397 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3398 const struct intel_dpll_hw_state *hw_state)
3400 DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3401 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3402 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3403 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3404 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3405 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3406 hw_state->cfgcr0, hw_state->cfgcr1,
3407 hw_state->mg_refclkin_ctl,
3408 hw_state->mg_clktop2_coreclkctl1,
3409 hw_state->mg_clktop2_hsclkctl,
3410 hw_state->mg_pll_div0,
3411 hw_state->mg_pll_div1,
3412 hw_state->mg_pll_lf,
3413 hw_state->mg_pll_frac_lock,
3414 hw_state->mg_pll_ssc,
3415 hw_state->mg_pll_bias,
3416 hw_state->mg_pll_tdc_coldst_bias);
3419 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3420 .enable = combo_pll_enable,
3421 .disable = combo_pll_disable,
3422 .get_hw_state = combo_pll_get_hw_state,
3425 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3426 .enable = tbt_pll_enable,
3427 .disable = tbt_pll_disable,
3428 .get_hw_state = tbt_pll_get_hw_state,
3431 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3432 .enable = mg_pll_enable,
3433 .disable = mg_pll_disable,
3434 .get_hw_state = mg_pll_get_hw_state,
3437 static const struct dpll_info icl_plls[] = {
3438 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3439 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3440 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3441 { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3442 { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3443 { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3444 { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3448 static const struct intel_dpll_mgr icl_pll_mgr = {
3449 .dpll_info = icl_plls,
3450 .get_dplls = icl_get_dplls,
3451 .put_dplls = icl_put_dplls,
3452 .update_active_dpll = icl_update_active_dpll,
3453 .dump_hw_state = icl_dump_hw_state,
3456 static const struct dpll_info ehl_plls[] = {
3457 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3458 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3459 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3463 static const struct intel_dpll_mgr ehl_pll_mgr = {
3464 .dpll_info = ehl_plls,
3465 .get_dplls = icl_get_dplls,
3466 .put_dplls = icl_put_dplls,
3467 .dump_hw_state = icl_dump_hw_state,
3470 static const struct dpll_info tgl_plls[] = {
3471 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3472 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3473 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3474 /* TODO: Add typeC plls */
3478 static const struct intel_dpll_mgr tgl_pll_mgr = {
3479 .dpll_info = tgl_plls,
3480 .get_dplls = icl_get_dplls,
3481 .put_dplls = icl_put_dplls,
3482 .dump_hw_state = icl_dump_hw_state,
3486 * intel_shared_dpll_init - Initialize shared DPLLs
3489 * Initialize shared DPLLs for @dev.
3491 void intel_shared_dpll_init(struct drm_device *dev)
3493 struct drm_i915_private *dev_priv = to_i915(dev);
3494 const struct intel_dpll_mgr *dpll_mgr = NULL;
3495 const struct dpll_info *dpll_info;
3498 if (INTEL_GEN(dev_priv) >= 12)
3499 dpll_mgr = &tgl_pll_mgr;
3500 else if (IS_ELKHARTLAKE(dev_priv))
3501 dpll_mgr = &ehl_pll_mgr;
3502 else if (INTEL_GEN(dev_priv) >= 11)
3503 dpll_mgr = &icl_pll_mgr;
3504 else if (IS_CANNONLAKE(dev_priv))
3505 dpll_mgr = &cnl_pll_mgr;
3506 else if (IS_GEN9_BC(dev_priv))
3507 dpll_mgr = &skl_pll_mgr;
3508 else if (IS_GEN9_LP(dev_priv))
3509 dpll_mgr = &bxt_pll_mgr;
3510 else if (HAS_DDI(dev_priv))
3511 dpll_mgr = &hsw_pll_mgr;
3512 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
3513 dpll_mgr = &pch_pll_mgr;
3516 dev_priv->num_shared_dpll = 0;
3520 dpll_info = dpll_mgr->dpll_info;
3522 for (i = 0; dpll_info[i].name; i++) {
3523 WARN_ON(i != dpll_info[i].id);
3524 dev_priv->shared_dplls[i].info = &dpll_info[i];
3527 dev_priv->dpll_mgr = dpll_mgr;
3528 dev_priv->num_shared_dpll = i;
3529 mutex_init(&dev_priv->dpll_lock);
3531 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
3535 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
3536 * @state: atomic state
3537 * @crtc: CRTC to reserve DPLLs for
3540 * This function reserves all required DPLLs for the given CRTC and encoder
3541 * combination in the current atomic commit @state and the new @crtc atomic
3544 * The new configuration in the atomic commit @state is made effective by
3545 * calling intel_shared_dpll_swap_state().
3547 * The reserved DPLLs should be released by calling
3548 * intel_release_shared_dplls().
3551 * True if all required DPLLs were successfully reserved.
3553 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
3554 struct intel_crtc *crtc,
3555 struct intel_encoder *encoder)
3557 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3558 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3560 if (WARN_ON(!dpll_mgr))
3563 return dpll_mgr->get_dplls(state, crtc, encoder);
3567 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
3568 * @state: atomic state
3569 * @crtc: crtc from which the DPLLs are to be released
3571 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
3572 * from the current atomic commit @state and the old @crtc atomic state.
3574 * The new configuration in the atomic commit @state is made effective by
3575 * calling intel_shared_dpll_swap_state().
3577 void intel_release_shared_dplls(struct intel_atomic_state *state,
3578 struct intel_crtc *crtc)
3580 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3581 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3584 * FIXME: this function is called for every platform having a
3585 * compute_clock hook, even though the platform doesn't yet support
3586 * the shared DPLL framework and intel_reserve_shared_dplls() is not
3592 dpll_mgr->put_dplls(state, crtc);
3596 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
3597 * @state: atomic state
3598 * @crtc: the CRTC for which to update the active DPLL
3599 * @encoder: encoder determining the type of port DPLL
3601 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
3602 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
3603 * DPLL selected will be based on the current mode of the encoder's port.
3605 void intel_update_active_dpll(struct intel_atomic_state *state,
3606 struct intel_crtc *crtc,
3607 struct intel_encoder *encoder)
3609 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3610 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3612 if (WARN_ON(!dpll_mgr))
3615 dpll_mgr->update_active_dpll(state, crtc, encoder);
3619 * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
3620 * @dev_priv: i915 drm device
3621 * @hw_state: hw state to be written to the log
3623 * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
3625 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
3626 const struct intel_dpll_hw_state *hw_state)
3628 if (dev_priv->dpll_mgr) {
3629 dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
3631 /* fallback for platforms that don't use the shared dpll
3634 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
3635 "fp0: 0x%x, fp1: 0x%x\n",