]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/i915/display/intel_display.c
drm/i915: Avoid calling i915_gem_object_unbind holding object lock
[linux.git] / drivers / gpu / drm / i915 / display / intel_display.c
index 7d6d0393d73ffbdd801d638049209b0433f9677b..37760a3814020ce490e698454e3d97a8b53627a5 100644 (file)
@@ -171,7 +171,6 @@ static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
 static void intel_modeset_setup_hw_state(struct drm_device *dev,
                                         struct drm_modeset_acquire_ctx *ctx);
-static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
 
 struct intel_limit {
        struct {
@@ -562,6 +561,12 @@ is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
                crtc_state->sync_mode_slaves_mask);
 }
 
+static bool
+is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
+{
+       return crtc_state->master_transcoder != INVALID_TRANSCODER;
+}
+
 /*
  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
@@ -2160,19 +2165,18 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
         * pin/unpin/fence and not more.
         */
        wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-       i915_gem_object_lock(obj);
 
        atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
 
-       pinctl = 0;
-
-       /* Valleyview is definitely limited to scanning out the first
+       /*
+        * Valleyview is definitely limited to scanning out the first
         * 512MiB. Lets presume this behaviour was inherited from the
         * g4x display engine and that all earlier gen are similarly
         * limited. Testing suggests that it is a little more
         * complicated than this. For example, Cherryview appears quite
         * happy to scanout from anywhere within its global aperture.
         */
+       pinctl = 0;
        if (HAS_GMCH(dev_priv))
                pinctl |= PIN_MAPPABLE;
 
@@ -2184,7 +2188,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
        if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
                int ret;
 
-               /* Install a fence for tiled scan-out. Pre-i965 always needs a
+               /*
+                * Install a fence for tiled scan-out. Pre-i965 always needs a
                 * fence, whereas 965+ only requires a fence if using
                 * framebuffer compression.  For simplicity, we always, when
                 * possible, install a fence as the cost is not that onerous.
@@ -2214,8 +2219,6 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
        i915_vma_get(vma);
 err:
        atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
-
-       i915_gem_object_unlock(obj);
        intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
        return vma;
 }
@@ -3212,6 +3215,7 @@ static void fixup_active_planes(struct intel_crtc_state *crtc_state)
 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
                                         struct intel_plane *plane)
 {
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_crtc_state *crtc_state =
                to_intel_crtc_state(crtc->base.state);
        struct intel_plane_state *plane_state =
@@ -3227,7 +3231,27 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
        crtc_state->min_cdclk[plane->id] = 0;
 
        if (plane->id == PLANE_PRIMARY)
-               intel_pre_disable_primary_noatomic(&crtc->base);
+               hsw_disable_ips(crtc_state);
+
+       /*
+        * Vblank time updates from the shadow to live plane control register
+        * are blocked if the memory self-refresh mode is active at that
+        * moment. So to make sure the plane gets truly disabled, disable
+        * first the self-refresh mode. The self-refresh enable bit in turn
+        * will be checked/applied by the HW only at the next frame start
+        * event which is after the vblank start event, so we need to have a
+        * wait-for-vblank between disabling the plane and the pipe.
+        */
+       if (HAS_GMCH(dev_priv) &&
+           intel_set_memory_cxsr(dev_priv, false))
+               intel_wait_for_vblank(dev_priv, crtc->pipe);
+
+       /*
+        * Gen2 reports pipe underruns whenever all planes are disabled.
+        * So disable underrun reporting before all the planes get disabled.
+        */
+       if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
+               intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
 
        intel_disable_plane(plane, crtc_state);
 }
@@ -5908,73 +5932,6 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
         */
 }
 
-/**
- * intel_post_enable_primary - Perform operations after enabling primary plane
- * @crtc: the CRTC whose primary plane was just enabled
- * @new_crtc_state: the enabling state
- *
- * Performs potentially sleeping operations that must be done after the primary
- * plane is enabled, such as updating FBC and IPS.  Note that this may be
- * called due to an explicit primary plane update, or due to an implicit
- * re-enable that is caused when a sprite plane is updated to no longer
- * completely hide the primary plane.
- */
-static void
-intel_post_enable_primary(struct drm_crtc *crtc,
-                         const struct intel_crtc_state *new_crtc_state)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum pipe pipe = intel_crtc->pipe;
-
-       /*
-        * Gen2 reports pipe underruns whenever all planes are disabled.
-        * So don't enable underrun reporting before at least some planes
-        * are enabled.
-        * FIXME: Need to fix the logic to work when we turn off all planes
-        * but leave the pipe running.
-        */
-       if (IS_GEN(dev_priv, 2))
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
-       /* Underruns don't always raise interrupts, so check manually. */
-       intel_check_cpu_fifo_underruns(dev_priv);
-       intel_check_pch_fifo_underruns(dev_priv);
-}
-
-/* FIXME get rid of this and use pre_plane_update */
-static void
-intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum pipe pipe = intel_crtc->pipe;
-
-       /*
-        * Gen2 reports pipe underruns whenever all planes are disabled.
-        * So disable underrun reporting before all the planes get disabled.
-        */
-       if (IS_GEN(dev_priv, 2))
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
-       hsw_disable_ips(to_intel_crtc_state(crtc->state));
-
-       /*
-        * Vblank time updates from the shadow to live plane control register
-        * are blocked if the memory self-refresh mode is active at that
-        * moment. So to make sure the plane gets truly disabled, disable
-        * first the self-refresh mode. The self-refresh enable bit in turn
-        * will be checked/applied by the HW only at the next frame start
-        * event which is after the vblank start event, so we need to have a
-        * wait-for-vblank between disabling the plane and the pipe.
-        */
-       if (HAS_GMCH(dev_priv) &&
-           intel_set_memory_cxsr(dev_priv, false))
-               intel_wait_for_vblank(dev_priv, pipe);
-}
-
 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
                                       const struct intel_crtc_state *new_crtc_state)
 {
@@ -6037,9 +5994,10 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
        return !old_crtc_state->ips_enabled;
 }
 
-static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
-                         const struct intel_crtc_state *crtc_state)
+static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
 {
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
        if (!crtc_state->nv12_planes)
                return false;
 
@@ -6050,9 +6008,10 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
        return false;
 }
 
-static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
-                              const struct intel_crtc_state *crtc_state)
+static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
 {
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
        /* Wa_2006604312:icl */
        if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
                return true;
@@ -6060,89 +6019,81 @@ static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
        return false;
 }
 
-static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
+static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
+                           const struct intel_crtc_state *new_crtc_state)
 {
-       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_atomic_state *state = old_crtc_state->uapi.state;
-       struct intel_crtc_state *pipe_config =
-               intel_atomic_get_new_crtc_state(to_intel_atomic_state(state),
-                                               crtc);
-       struct drm_plane *primary = crtc->base.primary;
-       struct drm_plane_state *old_primary_state =
-               drm_atomic_get_old_plane_state(state, primary);
+       return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
+               new_crtc_state->active_planes;
+}
 
-       intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
+static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
+                            const struct intel_crtc_state *new_crtc_state)
+{
+       return old_crtc_state->active_planes &&
+               (!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
+}
 
-       if (pipe_config->update_wm_post && pipe_config->hw.active)
-               intel_update_watermarks(crtc);
+static void intel_post_plane_update(struct intel_atomic_state *state,
+                                   struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       struct intel_plane *primary = to_intel_plane(crtc->base.primary);
+       const struct intel_crtc_state *old_crtc_state =
+               intel_atomic_get_old_crtc_state(state, crtc);
+       const struct intel_crtc_state *new_crtc_state =
+               intel_atomic_get_new_crtc_state(state, crtc);
+       const struct intel_plane_state *new_primary_state =
+               intel_atomic_get_new_plane_state(state, primary);
+       enum pipe pipe = crtc->pipe;
+
+       intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
 
-       if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
-               hsw_enable_ips(pipe_config);
+       if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
+               intel_update_watermarks(crtc);
 
-       if (old_primary_state) {
-               struct drm_plane_state *new_primary_state =
-                       drm_atomic_get_new_plane_state(state, primary);
+       if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
+               hsw_enable_ips(new_crtc_state);
 
+       if (new_primary_state)
                intel_fbc_post_update(crtc);
 
-               if (new_primary_state->visible &&
-                   (needs_modeset(pipe_config) ||
-                    !old_primary_state->visible))
-                       intel_post_enable_primary(&crtc->base, pipe_config);
-       }
-
-       if (needs_nv12_wa(dev_priv, old_crtc_state) &&
-           !needs_nv12_wa(dev_priv, pipe_config))
-               skl_wa_827(dev_priv, crtc->pipe, false);
+       if (needs_nv12_wa(old_crtc_state) &&
+           !needs_nv12_wa(new_crtc_state))
+               skl_wa_827(dev_priv, pipe, false);
 
-       if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
-           !needs_scalerclk_wa(dev_priv, pipe_config))
-               icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
+       if (needs_scalerclk_wa(old_crtc_state) &&
+           !needs_scalerclk_wa(new_crtc_state))
+               icl_wa_scalerclkgating(dev_priv, pipe, false);
 }
 
-static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
-                                  struct intel_crtc_state *pipe_config)
+static void intel_pre_plane_update(struct intel_atomic_state *state,
+                                  struct intel_crtc *crtc)
 {
-       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_atomic_state *state = old_crtc_state->uapi.state;
-       struct drm_plane *primary = crtc->base.primary;
-       struct drm_plane_state *old_primary_state =
-               drm_atomic_get_old_plane_state(state, primary);
-       bool modeset = needs_modeset(pipe_config);
-       struct intel_atomic_state *intel_state =
-               to_intel_atomic_state(state);
+       struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+       struct intel_plane *primary = to_intel_plane(crtc->base.primary);
+       const struct intel_crtc_state *old_crtc_state =
+               intel_atomic_get_old_crtc_state(state, crtc);
+       const struct intel_crtc_state *new_crtc_state =
+               intel_atomic_get_new_crtc_state(state, crtc);
+       const struct intel_plane_state *new_primary_state =
+               intel_atomic_get_new_plane_state(state, primary);
+       enum pipe pipe = crtc->pipe;
 
-       if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
+       if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
                hsw_disable_ips(old_crtc_state);
 
-       if (old_primary_state) {
-               struct intel_plane_state *new_primary_state =
-                       intel_atomic_get_new_plane_state(intel_state,
-                                                        to_intel_plane(primary));
-
-               intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
-               /*
-                * Gen2 reports pipe underruns whenever all planes are disabled.
-                * So disable underrun reporting before all the planes get disabled.
-                */
-               if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
-                   (modeset || !new_primary_state->uapi.visible))
-                       intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
-       }
+       if (new_primary_state)
+               intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state);
 
        /* Display WA 827 */
-       if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
-           needs_nv12_wa(dev_priv, pipe_config))
-               skl_wa_827(dev_priv, crtc->pipe, true);
+       if (!needs_nv12_wa(old_crtc_state) &&
+           needs_nv12_wa(new_crtc_state))
+               skl_wa_827(dev_priv, pipe, true);
 
        /* Wa_2006604312:icl */
-       if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
-           needs_scalerclk_wa(dev_priv, pipe_config))
-               icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
+       if (!needs_scalerclk_wa(old_crtc_state) &&
+           needs_scalerclk_wa(new_crtc_state))
+               icl_wa_scalerclkgating(dev_priv, pipe, true);
 
        /*
         * Vblank time updates from the shadow to live plane control register
@@ -6154,8 +6105,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
         * wait-for-vblank between disabling the plane and the pipe.
         */
        if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
-           pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
-               intel_wait_for_vblank(dev_priv, crtc->pipe);
+           new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
+               intel_wait_for_vblank(dev_priv, pipe);
 
        /*
         * IVB workaround: must disable low power watermarks for at least
@@ -6164,35 +6115,45 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
         *
         * WaCxSRDisabledForSpriteScaling:ivb
         */
-       if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
-           old_crtc_state->hw.active)
-               intel_wait_for_vblank(dev_priv, crtc->pipe);
+       if (old_crtc_state->hw.active &&
+           new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
+               intel_wait_for_vblank(dev_priv, pipe);
 
        /*
-        * If we're doing a modeset, we're done.  No need to do any pre-vblank
-        * watermark programming here.
+        * If we're doing a modeset we don't need to do any
+        * pre-vblank watermark programming here.
         */
-       if (needs_modeset(pipe_config))
-               return;
+       if (!needs_modeset(new_crtc_state)) {
+               /*
+                * For platforms that support atomic watermarks, program the
+                * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
+                * will be the intermediate values that are safe for both pre- and
+                * post- vblank; when vblank happens, the 'active' values will be set
+                * to the final 'target' values and we'll do this again to get the
+                * optimal watermarks.  For gen9+ platforms, the values we program here
+                * will be the final target values which will get automatically latched
+                * at vblank time; no further programming will be necessary.
+                *
+                * If a platform hasn't been transitioned to atomic watermarks yet,
+                * we'll continue to update watermarks the old way, if flags tell
+                * us to.
+                */
+               if (dev_priv->display.initial_watermarks)
+                       dev_priv->display.initial_watermarks(state, crtc);
+               else if (new_crtc_state->update_wm_pre)
+                       intel_update_watermarks(crtc);
+       }
 
        /*
-        * For platforms that support atomic watermarks, program the
-        * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
-        * will be the intermediate values that are safe for both pre- and
-        * post- vblank; when vblank happens, the 'active' values will be set
-        * to the final 'target' values and we'll do this again to get the
-        * optimal watermarks.  For gen9+ platforms, the values we program here
-        * will be the final target values which will get automatically latched
-        * at vblank time; no further programming will be necessary.
+        * Gen2 reports pipe underruns whenever all planes are disabled.
+        * So disable underrun reporting before all the planes get disabled.
         *
-        * If a platform hasn't been transitioned to atomic watermarks yet,
-        * we'll continue to update watermarks the old way, if flags tell
-        * us to.
+        * We do this after .initial_watermarks() so that we have a
+        * chance of catching underruns with the intermediate watermarks
+        * vs. the old plane configuration.
         */
-       if (dev_priv->display.initial_watermarks)
-               dev_priv->display.initial_watermarks(intel_state, crtc);
-       else if (pipe_config->update_wm_pre)
-               intel_update_watermarks(crtc);
+       if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
+               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 }
 
 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
@@ -7203,84 +7164,84 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
                i830_enable_pipe(dev_priv, pipe);
 }
 
-static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
+static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
                                        struct drm_modeset_acquire_ctx *ctx)
 {
        struct intel_encoder *encoder;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_bw_state *bw_state =
                to_intel_bw_state(dev_priv->bw_obj.state);
        struct intel_crtc_state *crtc_state =
-               to_intel_crtc_state(crtc->state);
+               to_intel_crtc_state(crtc->base.state);
        enum intel_display_power_domain domain;
        struct intel_plane *plane;
-       u64 domains;
        struct drm_atomic_state *state;
        struct intel_crtc_state *temp_crtc_state;
+       enum pipe pipe = crtc->pipe;
+       u64 domains;
        int ret;
 
-       if (!intel_crtc->active)
+       if (!crtc_state->hw.active)
                return;
 
-       for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
+       for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
                const struct intel_plane_state *plane_state =
                        to_intel_plane_state(plane->base.state);
 
                if (plane_state->uapi.visible)
-                       intel_plane_disable_noatomic(intel_crtc, plane);
+                       intel_plane_disable_noatomic(crtc, plane);
        }
 
-       state = drm_atomic_state_alloc(crtc->dev);
+       state = drm_atomic_state_alloc(&dev_priv->drm);
        if (!state) {
                DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
-                             crtc->base.id, crtc->name);
+                             crtc->base.base.id, crtc->base.name);
                return;
        }
 
        state->acquire_ctx = ctx;
 
        /* Everything's already locked, -EDEADLK can't happen. */
-       temp_crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
-       ret = drm_atomic_add_affected_connectors(state, crtc);
+       temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
+       ret = drm_atomic_add_affected_connectors(state, &crtc->base);
 
        WARN_ON(IS_ERR(temp_crtc_state) || ret);
 
-       dev_priv->display.crtc_disable(to_intel_atomic_state(state),
-                                      intel_crtc);
+       dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
 
        drm_atomic_state_put(state);
 
        DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
-                     crtc->base.id, crtc->name);
-
-       WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
-       crtc->state->active = false;
-       intel_crtc->active = false;
-       crtc->enabled = false;
-       crtc->state->connector_mask = 0;
-       crtc->state->encoder_mask = 0;
+                     crtc->base.base.id, crtc->base.name);
+
+       crtc->active = false;
+       crtc->base.enabled = false;
+
+       WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
+       crtc_state->uapi.active = false;
+       crtc_state->uapi.connector_mask = 0;
+       crtc_state->uapi.encoder_mask = 0;
        intel_crtc_free_hw_state(crtc_state);
        memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
 
-       for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
+       for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
                encoder->base.crtc = NULL;
 
-       intel_fbc_disable(intel_crtc);
-       intel_update_watermarks(intel_crtc);
-       intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
+       intel_fbc_disable(crtc);
+       intel_update_watermarks(crtc);
+       intel_disable_shared_dpll(crtc_state);
 
-       domains = intel_crtc->enabled_power_domains;
+       domains = crtc->enabled_power_domains;
        for_each_power_domain(domain, domains)
                intel_display_power_put_unchecked(dev_priv, domain);
-       intel_crtc->enabled_power_domains = 0;
+       crtc->enabled_power_domains = 0;
 
-       dev_priv->active_pipes &= ~BIT(intel_crtc->pipe);
-       dev_priv->min_cdclk[intel_crtc->pipe] = 0;
-       dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
+       dev_priv->active_pipes &= ~BIT(pipe);
+       dev_priv->min_cdclk[pipe] = 0;
+       dev_priv->min_voltage_level[pipe] = 0;
 
-       bw_state->data_rate[intel_crtc->pipe] = 0;
-       bw_state->num_active_planes[intel_crtc->pipe] = 0;
+       bw_state->data_rate[pipe] = 0;
+       bw_state->num_active_planes[pipe] = 0;
 }
 
 /*
@@ -14264,7 +14225,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-       if (!IS_GEN(dev_priv, 2))
+       if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
                intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
 
        if (crtc_state->has_pch_encoder) {
@@ -14363,7 +14324,7 @@ static void intel_update_crtc(struct intel_crtc *crtc,
                     new_crtc_state->update_pipe))
                        intel_color_load_luts(new_crtc_state);
 
-               intel_pre_plane_update(old_crtc_state, new_crtc_state);
+               intel_pre_plane_update(state, crtc);
 
                if (new_crtc_state->update_pipe)
                        intel_encoders_update_pipe(state, crtc);
@@ -14429,13 +14390,6 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
        intel_fbc_disable(crtc);
        intel_disable_shared_dpll(old_crtc_state);
 
-       /*
-        * Underruns don't always raise interrupts,
-        * so check manually.
-        */
-       intel_check_cpu_fifo_underruns(dev_priv);
-       intel_check_pch_fifo_underruns(dev_priv);
-
        /* FIXME unify this for all platforms */
        if (!new_crtc_state->hw.active &&
            !HAS_GMCH(dev_priv) &&
@@ -14443,77 +14397,47 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
                dev_priv->display.initial_watermarks(state, crtc);
 }
 
-static void intel_trans_port_sync_modeset_disables(struct intel_atomic_state *state,
-                                                  struct intel_crtc *crtc,
-                                                  struct intel_crtc_state *old_crtc_state,
-                                                  struct intel_crtc_state *new_crtc_state)
-{
-       struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
-       struct intel_crtc_state *new_slave_crtc_state =
-               intel_atomic_get_new_crtc_state(state, slave_crtc);
-       struct intel_crtc_state *old_slave_crtc_state =
-               intel_atomic_get_old_crtc_state(state, slave_crtc);
-
-       WARN_ON(!slave_crtc || !new_slave_crtc_state ||
-               !old_slave_crtc_state);
-
-       /* Disable Slave first */
-       intel_pre_plane_update(old_slave_crtc_state, new_slave_crtc_state);
-       if (old_slave_crtc_state->hw.active)
-               intel_old_crtc_state_disables(state,
-                                             old_slave_crtc_state,
-                                             new_slave_crtc_state,
-                                             slave_crtc);
-
-       /* Disable Master */
-       intel_pre_plane_update(old_crtc_state, new_crtc_state);
-       if (old_crtc_state->hw.active)
-               intel_old_crtc_state_disables(state,
-                                             old_crtc_state,
-                                             new_crtc_state,
-                                             crtc);
-}
-
 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
 {
        struct intel_crtc_state *new_crtc_state, *old_crtc_state;
        struct intel_crtc *crtc;
+       u32 handled = 0;
        int i;
 
-       /*
-        * Disable CRTC/pipes in reverse order because some features(MST in
-        * TGL+) requires master and slave relationship between pipes, so it
-        * should always pick the lowest pipe as master as it will be enabled
-        * first and disable in the reverse order so the master will be the
-        * last one to be disabled.
-        */
-       for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
-                                                   new_crtc_state, i) {
+       /* Only disable port sync slaves */
+       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+                                           new_crtc_state, i) {
                if (!needs_modeset(new_crtc_state))
                        continue;
 
+               if (!old_crtc_state->hw.active)
+                       continue;
+
                /* In case of Transcoder port Sync master slave CRTCs can be
                 * assigned in any order and we need to make sure that
                 * slave CRTCs are disabled first and then master CRTC since
                 * Slave vblanks are masked till Master Vblanks.
                 */
-               if (is_trans_port_sync_mode(new_crtc_state)) {
-                       if (is_trans_port_sync_master(new_crtc_state))
-                               intel_trans_port_sync_modeset_disables(state,
-                                                                      crtc,
-                                                                      old_crtc_state,
-                                                                      new_crtc_state);
-                       else
-                               continue;
-               } else {
-                       intel_pre_plane_update(old_crtc_state, new_crtc_state);
+               if (!is_trans_port_sync_slave(old_crtc_state))
+                       continue;
 
-                       if (old_crtc_state->hw.active)
-                               intel_old_crtc_state_disables(state,
-                                                             old_crtc_state,
-                                                             new_crtc_state,
-                                                             crtc);
-               }
+               intel_pre_plane_update(state, crtc);
+               intel_old_crtc_state_disables(state, old_crtc_state,
+                                             new_crtc_state, crtc);
+               handled |= BIT(crtc->pipe);
+       }
+
+       /* Disable everything else left on */
+       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+                                           new_crtc_state, i) {
+               if (!needs_modeset(new_crtc_state) ||
+                   (handled & BIT(crtc->pipe)))
+                       continue;
+
+               intel_pre_plane_update(state, crtc);
+               if (old_crtc_state->hw.active)
+                       intel_old_crtc_state_disables(state, old_crtc_state,
+                                                     new_crtc_state, crtc);
        }
 }
 
@@ -14653,7 +14577,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
 
        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
                /* ignore allocations for crtc's that have been turned off. */
-               if (new_crtc_state->hw.active)
+               if (!needs_modeset(new_crtc_state) && new_crtc_state->hw.active)
                        entries[i] = old_crtc_state->wm.skl.ddb;
 
        /* If 2nd DBuf slice required, enable it here */
@@ -14888,13 +14812,25 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
         *
         * TODO: Move this (and other cleanup) to an async worker eventually.
         */
-       for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+                                           new_crtc_state, i) {
+               /*
+                * Gen2 reports pipe underruns whenever all planes are disabled.
+                * So re-enable underrun reporting after some planes get enabled.
+                *
+                * We do this before .optimize_watermarks() so that we have a
+                * chance of catching underruns with the intermediate watermarks
+                * vs. the new plane configuration.
+                */
+               if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
+                       intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+
                if (dev_priv->display.optimize_watermarks)
                        dev_priv->display.optimize_watermarks(state, crtc);
        }
 
        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-               intel_post_plane_update(old_crtc_state);
+               intel_post_plane_update(state, crtc);
 
                if (put_domains[i])
                        modeset_put_power_domains(dev_priv, put_domains[i]);
@@ -14902,6 +14838,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
                intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
        }
 
+       /* Underruns don't always raise interrupts, so check manually */
+       intel_check_cpu_fifo_underruns(dev_priv);
+       intel_check_pch_fifo_underruns(dev_priv);
+
        if (state->modeset)
                intel_verify_planes(state);
 
@@ -17272,7 +17212,8 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
                val |= TRANS_FRAME_START_DELAY(0);
                I915_WRITE(reg, val);
        } else {
-               i915_reg_t reg = TRANS_CHICKEN2(crtc->pipe);
+               enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
+               i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
                u32 val;
 
                val = I915_READ(reg);
@@ -17318,7 +17259,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
        /* Adjust the state of the output pipe according to whether we
         * have active connectors/encoders. */
        if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
-               intel_crtc_disable_noatomic(&crtc->base, ctx);
+               intel_crtc_disable_noatomic(crtc, ctx);
 
        if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
                /*
@@ -17946,6 +17887,13 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
         */
        intel_hpd_poll_fini(i915);
 
+       /*
+        * MST topology needs to be suspended so we don't have any calls to
+        * fbdev after it's finalized. MST will be destroyed later as part of
+        * drm_mode_config_cleanup()
+        */
+       intel_dp_mst_suspend(i915);
+
        /* poll work can call into fbdev, hence clean that up afterwards */
        intel_fbdev_fini(i915);