]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/dp_mst: Add PBN calculation for DSC modes
[linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
index a52f0b13a2c8a1e6ea8f6682fffc74a2bb114319..abc359a20a18912ad6585a8249a51d064f394dad 100644 (file)
 #include "dc.h"
 #include "dc/inc/core_types.h"
 #include "dal_asic_id.h"
+#include "dmub/inc/dmub_srv.h"
+#include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
+#include "dc/dc_dmub_srv.h"
 
 #include "vid.h"
 #include "amdgpu.h"
 #include "amdgpu_ucode.h"
 #include "atom.h"
 #include "amdgpu_dm.h"
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "amdgpu_dm_hdcp.h"
+#include <drm/drm_hdcp.h>
+#endif
 #include "amdgpu_pm.h"
 
 #include "amd_shared.h"
@@ -67,8 +75,9 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_vblank.h>
 #include <drm/drm_audio_component.h>
+#include <drm/drm_hdcp.h>
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
 
 #include "dcn/dcn_1_0_offset.h"
 #include "modules/power/power_helpers.h"
 #include "modules/inc/mod_info_packet.h"
 
+#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
+
 #define FIRMWARE_RAVEN_DMCU            "amdgpu/raven_dmcu.bin"
 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
 
+/* Number of bytes in PSP header for firmware. */
+#define PSP_HEADER_BYTES 0x100
+
+/* Number of bytes in PSP footer for firmware. */
+#define PSP_FOOTER_BYTES 0x100
+
 /**
  * DOC: overview
  *
@@ -143,6 +161,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 static void handle_cursor_update(struct drm_plane *plane,
                                 struct drm_plane_state *old_plane_state);
 
+static void amdgpu_dm_set_psr_caps(struct dc_link *link);
+static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
+static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
+static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
+
+
 /*
  * dm_vblank_get_counter
  *
@@ -263,6 +287,13 @@ static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
               dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
 }
 
+/**
+ * dm_pflip_high_irq() - Handle pageflip interrupt
+ * @interrupt_params: ignored
+ *
+ * Handles the pageflip interrupt by notifying all interested parties
+ * that the pageflip has been completed.
+ */
 static void dm_pflip_high_irq(void *interrupt_params)
 {
        struct amdgpu_crtc *amdgpu_crtc;
@@ -407,6 +438,13 @@ static void dm_vupdate_high_irq(void *interrupt_params)
        }
 }
 
+/**
+ * dm_crtc_high_irq() - Handles CRTC interrupt
+ * @interrupt_params: ignored
+ *
+ * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
+ * event handler.
+ */
 static void dm_crtc_high_irq(void *interrupt_params)
 {
        struct common_irq_params *irq_params = interrupt_params;
@@ -454,6 +492,70 @@ static void dm_crtc_high_irq(void *interrupt_params)
        }
 }
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+/**
+ * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
+ * @interrupt params - interrupt parameters
+ *
+ * Notify DRM's vblank event handler at VSTARTUP
+ *
+ * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
+ * * We are close enough to VUPDATE - the point of no return for hw
+ * * We are in the fixed portion of variable front porch when vrr is enabled
+ * * We are before VUPDATE, where double-buffered vrr registers are swapped
+ *
+ * It is therefore the correct place to signal vblank, send user flip events,
+ * and update VRR.
+ */
+static void dm_dcn_crtc_high_irq(void *interrupt_params)
+{
+       struct common_irq_params *irq_params = interrupt_params;
+       struct amdgpu_device *adev = irq_params->adev;
+       struct amdgpu_crtc *acrtc;
+       struct dm_crtc_state *acrtc_state;
+       unsigned long flags;
+
+       acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
+
+       if (!acrtc)
+               return;
+
+       acrtc_state = to_dm_crtc_state(acrtc->base.state);
+
+       DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+                               amdgpu_dm_vrr_active(acrtc_state));
+
+       amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
+       drm_crtc_handle_vblank(&acrtc->base);
+
+       spin_lock_irqsave(&adev->ddev->event_lock, flags);
+
+       if (acrtc_state->vrr_params.supported &&
+           acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
+               mod_freesync_handle_v_update(
+               adev->dm.freesync_module,
+               acrtc_state->stream,
+               &acrtc_state->vrr_params);
+
+               dc_stream_adjust_vmin_vmax(
+                       adev->dm.dc,
+                       acrtc_state->stream,
+                       &acrtc_state->vrr_params.adjust);
+       }
+
+       if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) {
+               if (acrtc->event) {
+                       drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
+                       acrtc->event = NULL;
+                       drm_crtc_vblank_put(&acrtc->base);
+               }
+               acrtc->pflip_status = AMDGPU_FLIP_NONE;
+       }
+
+       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+}
+#endif
+
 static int dm_set_clockgating_state(void *handle,
                  enum amd_clockgating_state state)
 {
@@ -643,14 +745,135 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
        }
 }
 
+static int dm_dmub_hw_init(struct amdgpu_device *adev)
+{
+       const struct dmcub_firmware_header_v1_0 *hdr;
+       struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
+       struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+       const struct firmware *dmub_fw = adev->dm.dmub_fw;
+       struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+       struct abm *abm = adev->dm.dc->res_pool->abm;
+       struct dmub_srv_hw_params hw_params;
+       enum dmub_status status;
+       const unsigned char *fw_inst_const, *fw_bss_data;
+       uint32_t i, fw_inst_const_size, fw_bss_data_size;
+       bool has_hw_support;
+
+       if (!dmub_srv)
+               /* DMUB isn't supported on the ASIC. */
+               return 0;
+
+       if (!fb_info) {
+               DRM_ERROR("No framebuffer info for DMUB service.\n");
+               return -EINVAL;
+       }
+
+       if (!dmub_fw) {
+               /* Firmware required for DMUB support. */
+               DRM_ERROR("No firmware provided for DMUB.\n");
+               return -EINVAL;
+       }
+
+       status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
+       if (status != DMUB_STATUS_OK) {
+               DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
+               return -EINVAL;
+       }
+
+       if (!has_hw_support) {
+               DRM_INFO("DMUB unsupported on ASIC\n");
+               return 0;
+       }
+
+       hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
+
+       fw_inst_const = dmub_fw->data +
+                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+                       PSP_HEADER_BYTES;
+
+       fw_bss_data = dmub_fw->data +
+                     le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+                     le32_to_cpu(hdr->inst_const_bytes);
+
+       /* Copy firmware and bios info into FB memory. */
+       fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+                            PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+
+       fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+
+       memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
+              fw_inst_const_size);
+       memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
+              fw_bss_data_size);
+       memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
+              adev->bios_size);
+
+       /* Reset regions that need to be reset. */
+       memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
+       fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
+
+       memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
+              fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
+
+       memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
+              fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
+
+       /* Initialize hardware. */
+       memset(&hw_params, 0, sizeof(hw_params));
+       hw_params.fb_base = adev->gmc.fb_start;
+       hw_params.fb_offset = adev->gmc.aper_base;
+
+       if (dmcu)
+               hw_params.psp_version = dmcu->psp_version;
+
+       for (i = 0; i < fb_info->num_fb; ++i)
+               hw_params.fb[i] = &fb_info->fb[i];
+
+       status = dmub_srv_hw_init(dmub_srv, &hw_params);
+       if (status != DMUB_STATUS_OK) {
+               DRM_ERROR("Error initializing DMUB HW: %d\n", status);
+               return -EINVAL;
+       }
+
+       /* Wait for firmware load to finish. */
+       status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
+       if (status != DMUB_STATUS_OK)
+               DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+
+       /* Init DMCU and ABM if available. */
+       if (dmcu && abm) {
+               dmcu->funcs->dmcu_init(dmcu);
+               abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
+       }
+
+       adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+       if (!adev->dm.dc->ctx->dmub_srv) {
+               DRM_ERROR("Couldn't allocate DC DMUB server!\n");
+               return -ENOMEM;
+       }
+
+       DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
+                adev->dm.dmcub_fw_version);
+
+       return 0;
+}
+
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
        struct dc_init_data init_data;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+       struct dc_callback_init init_params;
+#endif
+       int r;
+
        adev->dm.ddev = adev->ddev;
        adev->dm.adev = adev;
 
        /* Zero all the fields */
        memset(&init_data, 0, sizeof(init_data));
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+       memset(&init_params, 0, sizeof(init_params));
+#endif
 
        mutex_init(&adev->dm.dc_lock);
        mutex_init(&adev->dm.audio_lock);
@@ -688,7 +911,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
         */
        if (adev->flags & AMD_IS_APU &&
            adev->asic_type >= CHIP_CARRIZO &&
-           adev->asic_type <= CHIP_RAVEN)
+           adev->asic_type < CHIP_RAVEN)
                init_data.flags.gpu_vm_support = true;
 
        if (amdgpu_dc_feature_mask & DC_FBC_MASK)
@@ -697,11 +920,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
        if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
                init_data.flags.multi_mon_pp_mclk_switch = true;
 
+       if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
+               init_data.flags.disable_fractional_pwm = true;
+
        init_data.flags.power_down_display_on_boot = true;
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_0
        init_data.soc_bounding_box = adev->dm.soc_bounding_box;
-#endif
 
        /* Display Core create. */
        adev->dm.dc = dc_create(&init_data);
@@ -713,6 +937,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                goto error;
        }
 
+       dc_hardware_init(adev->dm.dc);
+
+       r = dm_dmub_hw_init(adev);
+       if (r) {
+               DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+               goto error;
+       }
+
        adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
        if (!adev->dm.freesync_module) {
                DRM_ERROR(
@@ -723,6 +955,18 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
        amdgpu_dm_init_color_mod();
 
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+       if (adev->asic_type >= CHIP_RAVEN) {
+               adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
+
+               if (!adev->dm.hdcp_workqueue)
+                       DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
+               else
+                       DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
+
+               dc_init_callbacks(adev->dm.dc, &init_params);
+       }
+#endif
        if (amdgpu_dm_initialize_drm_device(adev)) {
                DRM_ERROR(
                "amdgpu: failed to initialize sw for display support.\n");
@@ -764,6 +1008,25 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
 
        amdgpu_dm_destroy_drm_device(&adev->dm);
 
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+       if (adev->dm.hdcp_workqueue) {
+               hdcp_destroy(adev->dm.hdcp_workqueue);
+               adev->dm.hdcp_workqueue = NULL;
+       }
+
+       if (adev->dm.dc)
+               dc_deinit_callbacks(adev->dm.dc);
+#endif
+       if (adev->dm.dc->ctx->dmub_srv) {
+               dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
+               adev->dm.dc->ctx->dmub_srv = NULL;
+       }
+
+       if (adev->dm.dmub_bo)
+               amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
+                                     &adev->dm.dmub_bo_gpu_addr,
+                                     &adev->dm.dmub_bo_cpu_addr);
+
        /* DC Destroy TODO: Replace destroy DAL */
        if (adev->dm.dc)
                dc_destroy(&adev->dm.dc);
@@ -874,9 +1137,160 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
        return 0;
 }
 
+static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
+{
+       struct amdgpu_device *adev = ctx;
+
+       return dm_read_reg(adev->dm.dc->ctx, address);
+}
+
+static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
+                                    uint32_t value)
+{
+       struct amdgpu_device *adev = ctx;
+
+       return dm_write_reg(adev->dm.dc->ctx, address, value);
+}
+
+static int dm_dmub_sw_init(struct amdgpu_device *adev)
+{
+       struct dmub_srv_create_params create_params;
+       struct dmub_srv_region_params region_params;
+       struct dmub_srv_region_info region_info;
+       struct dmub_srv_fb_params fb_params;
+       struct dmub_srv_fb_info *fb_info;
+       struct dmub_srv *dmub_srv;
+       const struct dmcub_firmware_header_v1_0 *hdr;
+       const char *fw_name_dmub;
+       enum dmub_asic dmub_asic;
+       enum dmub_status status;
+       int r;
+
+       switch (adev->asic_type) {
+       case CHIP_RENOIR:
+               dmub_asic = DMUB_ASIC_DCN21;
+               fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+               break;
+
+       default:
+               /* ASIC doesn't support DMUB. */
+               return 0;
+       }
+
+       r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
+       if (r) {
+               DRM_ERROR("DMUB firmware loading failed: %d\n", r);
+               return 0;
+       }
+
+       r = amdgpu_ucode_validate(adev->dm.dmub_fw);
+       if (r) {
+               DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
+               return 0;
+       }
+
+       if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+               DRM_WARN("Only PSP firmware loading is supported for DMUB\n");
+               return 0;
+       }
+
+       hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
+       adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
+               AMDGPU_UCODE_ID_DMCUB;
+       adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw;
+       adev->firmware.fw_size +=
+               ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
+
+       adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+       DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+                adev->dm.dmcub_fw_version);
+
+       adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
+       dmub_srv = adev->dm.dmub_srv;
+
+       if (!dmub_srv) {
+               DRM_ERROR("Failed to allocate DMUB service!\n");
+               return -ENOMEM;
+       }
+
+       memset(&create_params, 0, sizeof(create_params));
+       create_params.user_ctx = adev;
+       create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
+       create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
+       create_params.asic = dmub_asic;
+
+       /* Create the DMUB service. */
+       status = dmub_srv_create(dmub_srv, &create_params);
+       if (status != DMUB_STATUS_OK) {
+               DRM_ERROR("Error creating DMUB service: %d\n", status);
+               return -EINVAL;
+       }
+
+       /* Calculate the size of all the regions for the DMUB service. */
+       memset(&region_params, 0, sizeof(region_params));
+
+       region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+                                       PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+       region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+       region_params.vbios_size = adev->bios_size;
+       region_params.fw_bss_data =
+               adev->dm.dmub_fw->data +
+               le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+               le32_to_cpu(hdr->inst_const_bytes);
+
+       status = dmub_srv_calc_region_info(dmub_srv, &region_params,
+                                          &region_info);
+
+       if (status != DMUB_STATUS_OK) {
+               DRM_ERROR("Error calculating DMUB region info: %d\n", status);
+               return -EINVAL;
+       }
+
+       /*
+        * Allocate a framebuffer based on the total size of all the regions.
+        * TODO: Move this into GART.
+        */
+       r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
+                                   AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
+                                   &adev->dm.dmub_bo_gpu_addr,
+                                   &adev->dm.dmub_bo_cpu_addr);
+       if (r)
+               return r;
+
+       /* Rebase the regions on the framebuffer address. */
+       memset(&fb_params, 0, sizeof(fb_params));
+       fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
+       fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
+       fb_params.region_info = &region_info;
+
+       adev->dm.dmub_fb_info =
+               kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
+       fb_info = adev->dm.dmub_fb_info;
+
+       if (!fb_info) {
+               DRM_ERROR(
+                       "Failed to allocate framebuffer info for DMUB service!\n");
+               return -ENOMEM;
+       }
+
+       status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
+       if (status != DMUB_STATUS_OK) {
+               DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int dm_sw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = dm_dmub_sw_init(adev);
+       if (r)
+               return r;
 
        return load_dmcu_fw(adev);
 }
@@ -885,6 +1299,19 @@ static int dm_sw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       kfree(adev->dm.dmub_fb_info);
+       adev->dm.dmub_fb_info = NULL;
+
+       if (adev->dm.dmub_srv) {
+               dmub_srv_destroy(adev->dm.dmub_srv);
+               adev->dm.dmub_srv = NULL;
+       }
+
+       if (adev->dm.dmub_fw) {
+               release_firmware(adev->dm.dmub_fw);
+               adev->dm.dmub_fw = NULL;
+       }
+
        if(adev->dm.fw_dmcu) {
                release_firmware(adev->dm.fw_dmcu);
                adev->dm.fw_dmcu = NULL;
@@ -897,27 +1324,29 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
 {
        struct amdgpu_dm_connector *aconnector;
        struct drm_connector *connector;
+       struct drm_connector_list_iter iter;
        int ret = 0;
 
-       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_connector_list_iter_begin(dev, &iter);
+       drm_for_each_connector_iter(connector, &iter) {
                aconnector = to_amdgpu_dm_connector(connector);
                if (aconnector->dc_link->type == dc_connection_mst_branch &&
                    aconnector->mst_mgr.aux) {
                        DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
-                                       aconnector, aconnector->base.base.id);
+                                        aconnector,
+                                        aconnector->base.base.id);
 
                        ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
                        if (ret < 0) {
                                DRM_ERROR("DM_MST: Failed to start MST\n");
-                               ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
-                               return ret;
-                               }
+                               aconnector->dc_link->type =
+                                       dc_connection_single;
+                               break;
                        }
+               }
        }
+       drm_connector_list_iter_end(&iter);
 
-       drm_modeset_unlock(&dev->mode_config.connection_mutex);
        return ret;
 }
 
@@ -940,6 +1369,11 @@ static int dm_late_init(void *handle)
        params.backlight_lut_array_size = 16;
        params.backlight_lut_array = linear_lut;
 
+       /* Min backlight level after ABM reduction,  Don't allow below 1%
+        * 0xFFFF x 0.01 = 0x28F
+        */
+       params.min_abm_backlight = 0x28F;
+
        /* todo will enable for navi10 */
        if (adev->asic_type <= CHIP_RAVEN) {
                ret = dmcu_load_iram(dmcu, params);
@@ -955,14 +1389,13 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
 {
        struct amdgpu_dm_connector *aconnector;
        struct drm_connector *connector;
+       struct drm_connector_list_iter iter;
        struct drm_dp_mst_topology_mgr *mgr;
        int ret;
        bool need_hotplug = false;
 
-       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
-       list_for_each_entry(connector, &dev->mode_config.connector_list,
-                           head) {
+       drm_connector_list_iter_begin(dev, &iter);
+       drm_for_each_connector_iter(connector, &iter) {
                aconnector = to_amdgpu_dm_connector(connector);
                if (aconnector->dc_link->type != dc_connection_mst_branch ||
                    aconnector->mst_port)
@@ -973,15 +1406,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
                if (suspend) {
                        drm_dp_mst_topology_mgr_suspend(mgr);
                } else {
-                       ret = drm_dp_mst_topology_mgr_resume(mgr);
+                       ret = drm_dp_mst_topology_mgr_resume(mgr, true);
                        if (ret < 0) {
                                drm_dp_mst_topology_mgr_set_mst(mgr, false);
                                need_hotplug = true;
                        }
                }
        }
-
-       drm_modeset_unlock(&dev->mode_config.connection_mutex);
+       drm_connector_list_iter_end(&iter);
 
        if (need_hotplug)
                drm_kms_helper_hotplug_event(dev);
@@ -989,7 +1421,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
 
 /**
  * dm_hw_init() - Initialize DC device
- * @handle: The base driver device containing the amdpgu_dm device.
+ * @handle: The base driver device containing the amdgpu_dm device.
  *
  * Initialize the &struct amdgpu_display_manager device. This involves calling
  * the initializers of each DM component, then populating the struct with them.
@@ -1019,7 +1451,7 @@ static int dm_hw_init(void *handle)
 
 /**
  * dm_hw_fini() - Teardown DC device
- * @handle: The base driver device containing the amdpgu_dm device.
+ * @handle: The base driver device containing the amdgpu_dm device.
  *
  * Teardown components within &struct amdgpu_display_manager that require
  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
@@ -1163,6 +1595,7 @@ static int dm_resume(void *handle)
        struct amdgpu_display_manager *dm = &adev->dm;
        struct amdgpu_dm_connector *aconnector;
        struct drm_connector *connector;
+       struct drm_connector_list_iter iter;
        struct drm_crtc *crtc;
        struct drm_crtc_state *new_crtc_state;
        struct dm_crtc_state *dm_new_crtc_state;
@@ -1171,7 +1604,7 @@ static int dm_resume(void *handle)
        struct dm_plane_state *dm_new_plane_state;
        struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
        enum dc_connection_type new_connection_type = dc_connection_none;
-       int i;
+       int i, r;
 
        /* Recreate dc_state - DC invalidates it when setting power state to S3. */
        dc_release_state(dm_state->context);
@@ -1179,23 +1612,29 @@ static int dm_resume(void *handle)
        /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
        dc_resource_state_construct(dm->dc, dm_state->context);
 
+       /* Before powering on DC we need to re-initialize DMUB. */
+       r = dm_dmub_hw_init(adev);
+       if (r)
+               DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+
        /* power on hardware */
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
 
        /* program HPD filter */
        dc_resume(dm->dc);
 
-       /* On resume we need to  rewrite the MSTM control bits to enamble MST*/
-       s3_handle_mst(ddev, false);
-
        /*
         * early enable HPD Rx IRQ, should be done before set mode as short
         * pulse interrupts are used for MST
         */
        amdgpu_dm_irq_resume_early(adev);
 
+       /* On resume we need to rewrite the MSTM control bits to enable MST*/
+       s3_handle_mst(ddev, false);
+
        /* Do detection*/
-       list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
+       drm_connector_list_iter_begin(ddev, &iter);
+       drm_for_each_connector_iter(connector, &iter) {
                aconnector = to_amdgpu_dm_connector(connector);
 
                /*
@@ -1223,6 +1662,7 @@ static int dm_resume(void *handle)
                amdgpu_dm_update_connector_after_detect(aconnector);
                mutex_unlock(&aconnector->hpd_lock);
        }
+       drm_connector_list_iter_end(&iter);
 
        /* Force mode set in atomic commit */
        for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
@@ -1438,6 +1878,11 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
                dc_sink_release(aconnector->dc_sink);
                aconnector->dc_sink = NULL;
                aconnector->edid = NULL;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+               /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
+               if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+                       connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+#endif
        }
 
        mutex_unlock(&dev->mode_config.mutex);
@@ -1452,6 +1897,9 @@ static void handle_hpd_irq(void *param)
        struct drm_connector *connector = &aconnector->base;
        struct drm_device *dev = connector->dev;
        enum dc_connection_type new_connection_type = dc_connection_none;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+       struct amdgpu_device *adev = dev->dev_private;
+#endif
 
        /*
         * In case of failure or MST no need to update connector status or notify the OS
@@ -1459,6 +1907,10 @@ static void handle_hpd_irq(void *param)
         */
        mutex_lock(&aconnector->hpd_lock);
 
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+       if (adev->asic_type >= CHIP_RAVEN)
+               hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+#endif
        if (aconnector->fake_enable)
                aconnector->fake_enable = false;
 
@@ -1577,6 +2029,12 @@ static void handle_hpd_rx_irq(void *param)
        struct dc_link *dc_link = aconnector->dc_link;
        bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
        enum dc_connection_type new_connection_type = dc_connection_none;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+       union hpd_irq_data hpd_irq_data;
+       struct amdgpu_device *adev = dev->dev_private;
+
+       memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
+#endif
 
        /*
         * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
@@ -1586,7 +2044,12 @@ static void handle_hpd_rx_irq(void *param)
        if (dc_link->type != dc_connection_mst_branch)
                mutex_lock(&aconnector->hpd_lock);
 
+
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+       if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
+#else
        if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
+#endif
                        !is_mst_root_connector) {
                /* Downstream Port status changed. */
                if (!dc_link_detect_sink(dc_link, &new_connection_type))
@@ -1621,6 +2084,10 @@ static void handle_hpd_rx_irq(void *param)
                        drm_kms_helper_hotplug_event(dev);
                }
        }
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+       if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ)
+               hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
+#endif
        if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
            (dc_link->type == dc_connection_mst_branch))
                dm_handle_hpd_rx_irq(aconnector);
@@ -1775,7 +2242,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
        return 0;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 /* Register IRQ sources and initialize IRQ callbacks */
 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
 {
@@ -1821,35 +2288,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
                c_irq_params->irq_src = int_params.irq_source;
 
                amdgpu_dm_irq_register_interrupt(adev, &int_params,
-                               dm_crtc_high_irq, c_irq_params);
-       }
-
-       /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
-        * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
-        * to trigger at end of each vblank, regardless of state of the lock,
-        * matching DCE behaviour.
-        */
-       for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
-            i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
-            i++) {
-               r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
-
-               if (r) {
-                       DRM_ERROR("Failed to add vupdate irq id!\n");
-                       return r;
-               }
-
-               int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
-               int_params.irq_source =
-                       dc_interrupt_to_irq_source(dc, i, 0);
-
-               c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
-
-               c_irq_params->adev = adev;
-               c_irq_params->irq_src = int_params.irq_source;
-
-               amdgpu_dm_irq_register_interrupt(adev, &int_params,
-                               dm_vupdate_high_irq, c_irq_params);
+                               dm_dcn_crtc_high_irq, c_irq_params);
        }
 
        /* Use GRPH_PFLIP interrupt */
@@ -2334,6 +2773,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
                        amdgpu_dm_update_connector_after_detect(aconnector);
                        register_backlight_device(dm, link);
+                       if (amdgpu_dc_feature_mask & DC_PSR_MASK)
+                               amdgpu_dm_set_psr_caps(link);
                }
 
 
@@ -2362,16 +2803,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                        goto fail;
                }
                break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
        case CHIP_RAVEN:
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
        case CHIP_NAVI12:
        case CHIP_NAVI10:
        case CHIP_NAVI14:
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        case CHIP_RENOIR:
-#endif
                if (dcn10_register_irq_handlers(dm->adev)) {
                        DRM_ERROR("DM: Failed to initialize IRQ\n");
                        goto fail;
@@ -2517,14 +2954,13 @@ static int dm_early_init(void *handle)
                adev->mode_info.num_hpd = 6;
                adev->mode_info.num_dig = 6;
                break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
        case CHIP_RAVEN:
                adev->mode_info.num_crtc = 4;
                adev->mode_info.num_hpd = 4;
                adev->mode_info.num_dig = 4;
                break;
 #endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
        case CHIP_NAVI10:
        case CHIP_NAVI12:
                adev->mode_info.num_crtc = 6;
@@ -2536,14 +2972,11 @@ static int dm_early_init(void *handle)
                adev->mode_info.num_hpd = 5;
                adev->mode_info.num_dig = 5;
                break;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        case CHIP_RENOIR:
                adev->mode_info.num_crtc = 4;
                adev->mode_info.num_hpd = 4;
                adev->mode_info.num_dig = 4;
                break;
-#endif
        default:
                DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
                return -EINVAL;
@@ -2836,14 +3269,10 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
        if (adev->asic_type == CHIP_VEGA10 ||
            adev->asic_type == CHIP_VEGA12 ||
            adev->asic_type == CHIP_VEGA20 ||
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
            adev->asic_type == CHIP_NAVI10 ||
            adev->asic_type == CHIP_NAVI14 ||
            adev->asic_type == CHIP_NAVI12 ||
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
            adev->asic_type == CHIP_RENOIR ||
-#endif
            adev->asic_type == CHIP_RAVEN) {
                /* Fill GFX9 params */
                tiling_info->gfx9.num_pipes =
@@ -3161,12 +3590,26 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
 
 static enum dc_color_depth
 convert_color_depth_from_display_info(const struct drm_connector *connector,
-                                     const struct drm_connector_state *state)
+                                     const struct drm_connector_state *state,
+                                     bool is_y420)
 {
-       uint8_t bpc = (uint8_t)connector->display_info.bpc;
+       uint8_t bpc;
+
+       if (is_y420) {
+               bpc = 8;
 
-       /* Assume 8 bpc by default if no bpc is specified. */
-       bpc = bpc ? bpc : 8;
+               /* Cap display bpc based on HDMI 2.0 HF-VSDB */
+               if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
+                       bpc = 16;
+               else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
+                       bpc = 12;
+               else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
+                       bpc = 10;
+       } else {
+               bpc = (uint8_t)connector->display_info.bpc;
+               /* Assume 8 bpc by default if no bpc is specified. */
+               bpc = bpc ? bpc : 8;
+       }
 
        if (!state)
                state = connector->state;
@@ -3261,27 +3704,21 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
        return color_space;
 }
 
-static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
-{
-       if (timing_out->display_color_depth <= COLOR_DEPTH_888)
-               return;
-
-       timing_out->display_color_depth--;
-}
-
-static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
-                                               const struct drm_display_info *info)
+static bool adjust_colour_depth_from_display_info(
+       struct dc_crtc_timing *timing_out,
+       const struct drm_display_info *info)
 {
+       enum dc_color_depth depth = timing_out->display_color_depth;
        int normalized_clk;
-       if (timing_out->display_color_depth <= COLOR_DEPTH_888)
-               return;
        do {
                normalized_clk = timing_out->pix_clk_100hz / 10;
                /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
                if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
                        normalized_clk /= 2;
                /* Adjusting pix clock following on HDMI spec based on colour depth */
-               switch (timing_out->display_color_depth) {
+               switch (depth) {
+               case COLOR_DEPTH_888:
+                       break;
                case COLOR_DEPTH_101010:
                        normalized_clk = (normalized_clk * 30) / 24;
                        break;
@@ -3292,14 +3729,15 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_
                        normalized_clk = (normalized_clk * 48) / 24;
                        break;
                default:
-                       return;
+                       /* The above depths are the only ones valid for HDMI. */
+                       return false;
                }
-               if (normalized_clk <= info->max_tmds_clock)
-                       return;
-               reduce_mode_colour_depth(timing_out);
-
-       } while (timing_out->display_color_depth > COLOR_DEPTH_888);
-
+               if (normalized_clk <= info->max_tmds_clock) {
+                       timing_out->display_color_depth = depth;
+                       return true;
+               }
+       } while (--depth > COLOR_DEPTH_666);
+       return false;
 }
 
 static void fill_stream_properties_from_drm_display_mode(
@@ -3311,8 +3749,12 @@ static void fill_stream_properties_from_drm_display_mode(
 {
        struct dc_crtc_timing *timing_out = &stream->timing;
        const struct drm_display_info *info = &connector->display_info;
+       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+       struct hdmi_vendor_infoframe hv_frame;
+       struct hdmi_avi_infoframe avi_frame;
 
-       memset(timing_out, 0, sizeof(struct dc_crtc_timing));
+       memset(&hv_frame, 0, sizeof(hv_frame));
+       memset(&avi_frame, 0, sizeof(avi_frame));
 
        timing_out->h_border_left = 0;
        timing_out->h_border_right = 0;
@@ -3322,6 +3764,9 @@ static void fill_stream_properties_from_drm_display_mode(
        if (drm_mode_is_420_only(info, mode_in)
                        && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
                timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+       else if (drm_mode_is_420_also(info, mode_in)
+                       && aconnector->force_yuv420_output)
+               timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
        else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
                        && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
                timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
@@ -3330,7 +3775,8 @@ static void fill_stream_properties_from_drm_display_mode(
 
        timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
        timing_out->display_color_depth = convert_color_depth_from_display_info(
-               connector, connector_state);
+               connector, connector_state,
+               (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
        timing_out->scan_type = SCANNING_TYPE_NODATA;
        timing_out->hdmi_vic = 0;
 
@@ -3346,6 +3792,13 @@ static void fill_stream_properties_from_drm_display_mode(
                        timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
        }
 
+       if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+               drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
+               timing_out->vic = avi_frame.video_code;
+               drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
+               timing_out->hdmi_vic = hv_frame.vic;
+       }
+
        timing_out->h_addressable = mode_in->crtc_hdisplay;
        timing_out->h_total = mode_in->crtc_htotal;
        timing_out->h_sync_width =
@@ -3365,8 +3818,14 @@ static void fill_stream_properties_from_drm_display_mode(
 
        stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
        stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
-       if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
-               adjust_colour_depth_from_display_info(timing_out, info);
+       if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+               if (!adjust_colour_depth_from_display_info(timing_out, info) &&
+                   drm_mode_is_420_also(info, mode_in) &&
+                   timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
+                       timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+                       adjust_colour_depth_from_display_info(timing_out, info);
+               }
+       }
 }
 
 static void fill_audio_info(struct audio_info *audio_info,
@@ -3535,10 +3994,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
        int mode_refresh;
        int preferred_refresh = 0;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct dsc_dec_dpcd_caps dsc_caps;
-       uint32_t link_bandwidth_kbps;
 #endif
+       uint32_t link_bandwidth_kbps;
 
        struct dc_sink *sink = NULL;
        if (aconnector == NULL) {
@@ -3566,6 +4025,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
        stream->dm_stream_context = aconnector;
 
+       stream->timing.flags.LTE_340MCSC_SCRAMBLE =
+               drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
+
        list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
                /* Search for preferred mode */
                if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
@@ -3610,25 +4072,29 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                fill_stream_properties_from_drm_display_mode(stream,
                        &mode, &aconnector->base, con_state, old_stream);
 
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
        stream->timing.flags.DSC = 0;
 
        if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
-               dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+               dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+                                     aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
                                      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
                                      &dsc_caps);
+#endif
                link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
                                                             dc_link_get_link_cap(aconnector->dc_link));
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (dsc_caps.is_dsc_supported)
-                       if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc,
+                       if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
                                                  &dsc_caps,
+                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
                                                  link_bandwidth_kbps,
                                                  &stream->timing,
                                                  &stream->timing.dsc_cfg))
                                stream->timing.flags.DSC = 1;
-       }
 #endif
+       }
 
        update_stream_scaling_settings(&mode, dm_state, stream);
 
@@ -3639,6 +4105,20 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
        update_stream_signal(stream, sink);
 
+       if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+               mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
+       if (stream->link->psr_feature_enabled)  {
+               struct dc  *core_dc = stream->link->ctx->dc;
+
+               if (dc_is_dmcu_initialized(core_dc)) {
+                       struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+                       stream->psr_version = dmcu->dmcu_version.psr_version;
+                       mod_build_vsc_infopacket(stream,
+                                       &stream->vsc_infopacket,
+                                       &stream->use_vsc_sdp_for_colorimetry);
+               }
+       }
 finish:
        dc_sink_release(sink);
 
@@ -3727,6 +4207,10 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
        struct amdgpu_device *adev = crtc->dev->dev_private;
        int rc;
 
+       /* Do not set vupdate for DCN hardware */
+       if (adev->family > AMDGPU_FAMILY_AI)
+               return 0;
+
        irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
 
        rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
@@ -3970,7 +4454,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
                state->underscan_hborder = 0;
                state->underscan_vborder = 0;
                state->base.max_requested_bpc = 8;
-
+               state->vcpi_slots = 0;
+               state->pbn = 0;
                if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
                        state->abm_level = amdgpu_dm_abm_level;
 
@@ -3998,7 +4483,8 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
        new_state->underscan_enable = state->underscan_enable;
        new_state->underscan_hborder = state->underscan_hborder;
        new_state->underscan_vborder = state->underscan_vborder;
-
+       new_state->vcpi_slots = state->vcpi_slots;
+       new_state->pbn = state->pbn;
        return &new_state->base;
 }
 
@@ -4114,8 +4600,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
                result = MODE_OK;
        else
                DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
-                             mode->vdisplay,
                              mode->hdisplay,
+                             mode->vdisplay,
                              mode->clock,
                              dc_result);
 
@@ -4395,10 +4881,68 @@ static void dm_encoder_helper_disable(struct drm_encoder *encoder)
 
 }
 
+static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
+{
+       switch (display_color_depth) {
+               case COLOR_DEPTH_666:
+                       return 6;
+               case COLOR_DEPTH_888:
+                       return 8;
+               case COLOR_DEPTH_101010:
+                       return 10;
+               case COLOR_DEPTH_121212:
+                       return 12;
+               case COLOR_DEPTH_141414:
+                       return 14;
+               case COLOR_DEPTH_161616:
+                       return 16;
+               default:
+                       break;
+               }
+       return 0;
+}
+
 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
                                          struct drm_crtc_state *crtc_state,
                                          struct drm_connector_state *conn_state)
 {
+       struct drm_atomic_state *state = crtc_state->state;
+       struct drm_connector *connector = conn_state->connector;
+       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+       struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
+       const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+       struct drm_dp_mst_topology_mgr *mst_mgr;
+       struct drm_dp_mst_port *mst_port;
+       enum dc_color_depth color_depth;
+       int clock, bpp = 0;
+       bool is_y420 = false;
+
+       if (!aconnector->port || !aconnector->dc_sink)
+               return 0;
+
+       mst_port = aconnector->port;
+       mst_mgr = &aconnector->mst_port->mst_mgr;
+
+       if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
+               return 0;
+
+       if (!state->duplicated) {
+               is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
+                               aconnector->force_yuv420_output;
+               color_depth = convert_color_depth_from_display_info(connector, conn_state,
+                                                                   is_y420);
+               bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
+               clock = adjusted_mode->clock;
+               dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
+       }
+       dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
+                                                                          mst_mgr,
+                                                                          mst_port,
+                                                                          dm_new_connector_state->pbn);
+       if (dm_new_connector_state->vcpi_slots < 0) {
+               DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
+               return dm_new_connector_state->vcpi_slots;
+       }
        return 0;
 }
 
@@ -4494,7 +5038,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
        tv.num_shared = 1;
        list_add(&tv.head, &list);
 
-       r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true);
+       r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
        if (r) {
                dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
                return r;
@@ -4837,7 +5381,13 @@ static int to_drm_connector_type(enum signal_type st)
 
 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
 {
-       return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
+       struct drm_encoder *encoder;
+
+       /* There is only one encoder per connector */
+       drm_connector_for_each_possible_encoder(connector, encoder)
+               return encoder;
+
+       return NULL;
 }
 
 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
@@ -5063,9 +5613,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
 
        drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
 
-       /* This defaults to the max in the range, but we want 8bpc. */
-       aconnector->base.state->max_bpc = 8;
-       aconnector->base.state->max_requested_bpc = 8;
+       /* This defaults to the max in the range, but we want 8bpc for non-edp. */
+       aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
+       aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
 
        if (connector_type == DRM_MODE_CONNECTOR_eDP &&
            dc_is_dmcu_initialized(adev->dm.dc)) {
@@ -5082,6 +5632,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
 
                drm_connector_attach_vrr_capable_property(
                        &aconnector->base);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+               if (adev->asic_type >= CHIP_RAVEN)
+                       drm_connector_attach_content_protection_property(&aconnector->base, true);
+#endif
        }
 }
 
@@ -5189,11 +5743,12 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
 
        connector_type = to_drm_connector_type(link->connector_signal);
 
-       res = drm_connector_init(
+       res = drm_connector_init_with_ddc(
                        dm->ddev,
                        &aconnector->base,
                        &amdgpu_dm_connector_funcs,
-                       connector_type);
+                       connector_type,
+                       &i2c->base);
 
        if (res) {
                DRM_ERROR("connector_init failed\n");
@@ -5324,6 +5879,48 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,
        return false;
 }
 
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+static bool is_content_protection_different(struct drm_connector_state *state,
+                                           const struct drm_connector_state *old_state,
+                                           const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
+{
+       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+       if (old_state->hdcp_content_type != state->hdcp_content_type &&
+           state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+               state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               return true;
+       }
+
+       /* CP is being re enabled, ignore this */
+       if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+           state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+               state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+               return false;
+       }
+
+       /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
+       if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+           state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+               state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+
+       /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
+        * hot-plug, headless s3, dpms
+        */
+       if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
+           aconnector->dc_sink != NULL)
+               return true;
+
+       if (old_state->content_protection == state->content_protection)
+               return false;
+
+       if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+               return true;
+
+       return false;
+}
+
+#endif
 static void remove_stream(struct amdgpu_device *adev,
                          struct amdgpu_crtc *acrtc,
                          struct dc_stream_state *stream)
@@ -5665,6 +6262,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
        uint32_t target_vblank, last_flip_vblank;
        bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
        bool pflip_present = false;
+       bool swizzle = true;
        struct {
                struct dc_surface_update surface_updates[MAX_SURFACES];
                struct dc_plane_info plane_infos[MAX_SURFACES];
@@ -5710,6 +6308,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 
                dc_plane = dm_new_plane_state->dc_state;
 
+               if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
+                       swizzle = false;
+
                bundle->surface_updates[planes_count].surface = dc_plane;
                if (new_pcrtc_state->color_mgmt_changed) {
                        bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
@@ -5864,6 +6465,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
        /* Update the planes if changed or disable if we don't have any. */
        if ((planes_count || acrtc_state->active_planes == 0) &&
                acrtc_state->stream) {
+               bundle->stream_update.stream = acrtc_state->stream;
                if (new_pcrtc_state->mode_changed) {
                        bundle->stream_update.src = acrtc_state->stream->src;
                        bundle->stream_update.dst = acrtc_state->stream->dst;
@@ -5899,14 +6501,29 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                                &acrtc_state->vrr_params.adjust);
                        spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
                }
-
                mutex_lock(&dm->dc_lock);
+               if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+                               acrtc_state->stream->link->psr_allow_active)
+                       amdgpu_dm_psr_disable(acrtc_state->stream);
+
                dc_commit_updates_for_stream(dm->dc,
                                                     bundle->surface_updates,
                                                     planes_count,
                                                     acrtc_state->stream,
                                                     &bundle->stream_update,
                                                     dc_state);
+
+               if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+                                               acrtc_state->stream->psr_version &&
+                                               !acrtc_state->stream->link->psr_feature_enabled)
+                       amdgpu_dm_link_setup_psr(acrtc_state->stream);
+               else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
+                                               acrtc_state->stream->link->psr_feature_enabled &&
+                                               !acrtc_state->stream->link->psr_allow_active &&
+                                               swizzle) {
+                       amdgpu_dm_psr_enable(acrtc_state->stream);
+               }
+
                mutex_unlock(&dm->dc_lock);
        }
 
@@ -6215,10 +6832,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                        crtc->hwmode = new_crtc_state->mode;
                } else if (modereset_required(new_crtc_state)) {
                        DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
-
                        /* i.e. reset mode */
-                       if (dm_old_crtc_state->stream)
+                       if (dm_old_crtc_state->stream) {
+                               if (dm_old_crtc_state->stream->link->psr_allow_active)
+                                       amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
+
                                remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+                       }
                }
        } /* for_each_crtc_in_state() */
 
@@ -6248,6 +6868,34 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                                acrtc->otg_inst = status->primary_otg_inst;
                }
        }
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+       for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+               struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+               struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+               struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+               new_crtc_state = NULL;
+
+               if (acrtc)
+                       new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+               dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+               if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
+                   connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+                       hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+                       new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+                       continue;
+               }
+
+               if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
+                       hdcp_update_display(
+                               adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
+                               new_con_state->hdcp_content_type,
+                               new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
+                                                                                                        : false);
+       }
+#endif
 
        /* Handle connector state changes */
        for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -6287,9 +6935,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                if (!scaling_changed && !abm_changed && !hdr_changed)
                        continue;
 
+               stream_update.stream = dm_new_crtc_state->stream;
                if (scaling_changed) {
                        update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
-                                       dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
+                                       dm_new_con_state, dm_new_crtc_state->stream);
 
                        stream_update.src = dm_new_crtc_state->stream->src;
                        stream_update.dst = dm_new_crtc_state->stream->dst;
@@ -7034,7 +7683,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
        int i, j, num_plane, ret = 0;
        struct drm_plane_state *old_plane_state, *new_plane_state;
        struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
-       struct drm_crtc *new_plane_crtc, *old_plane_crtc;
+       struct drm_crtc *new_plane_crtc;
        struct drm_plane *plane;
 
        struct drm_crtc *crtc;
@@ -7080,7 +7729,6 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
                        uint64_t tiling_flags;
 
                        new_plane_crtc = new_plane_state->crtc;
-                       old_plane_crtc = old_plane_state->crtc;
                        new_dm_plane_state = to_dm_plane_state(new_plane_state);
                        old_dm_plane_state = to_dm_plane_state(old_plane_state);
 
@@ -7158,7 +7806,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
 
                status = dc_stream_get_status_from_state(old_dm_state->context,
                                                         new_dm_crtc_state->stream);
-
+               stream_update.stream = new_dm_crtc_state->stream;
                /*
                 * TODO: DC modifies the surface during this call so we need
                 * to lock here - find a way to do this without locking.
@@ -7336,6 +7984,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
        if (ret)
                goto fail;
 
+       /* Perform validation of MST topology in the state*/
+       ret = drm_dp_mst_atomic_check(state);
+       if (ret)
+               goto fail;
+
        if (state->legacy_cursor_update) {
                /*
                 * This is a fast cursor update coming from the plane update
@@ -7569,3 +8222,92 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
                                                       freesync_capable);
 }
 
+static void amdgpu_dm_set_psr_caps(struct dc_link *link)
+{
+       uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
+
+       if (!(link->connector_signal & SIGNAL_TYPE_EDP))
+               return;
+       if (link->type == dc_connection_none)
+               return;
+       if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
+                                       dpcd_data, sizeof(dpcd_data))) {
+               link->psr_feature_enabled = dpcd_data[0] ? true:false;
+               DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
+       }
+}
+
+/*
+ * amdgpu_dm_link_setup_psr() - configure psr link
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
+{
+       struct dc_link *link = NULL;
+       struct psr_config psr_config = {0};
+       struct psr_context psr_context = {0};
+       struct dc *dc = NULL;
+       bool ret = false;
+
+       if (stream == NULL)
+               return false;
+
+       link = stream->link;
+       dc = link->ctx->dc;
+
+       psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
+
+       if (psr_config.psr_version > 0) {
+               psr_config.psr_exit_link_training_required = 0x1;
+               psr_config.psr_frame_capture_indication_req = 0;
+               psr_config.psr_rfb_setup_time = 0x37;
+               psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
+               psr_config.allow_smu_optimizations = 0x0;
+
+               ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
+
+       }
+       DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_feature_enabled);
+
+       return ret;
+}
+
+/*
+ * amdgpu_dm_psr_enable() - enable psr f/w
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
+{
+       struct dc_link *link = stream->link;
+       struct dc_static_screen_events triggers = {0};
+
+       DRM_DEBUG_DRIVER("Enabling psr...\n");
+
+       triggers.cursor_update = true;
+       triggers.overlay_update = true;
+       triggers.surface_update = true;
+
+       dc_stream_set_static_screen_events(link->ctx->dc,
+                                          &stream, 1,
+                                          &triggers);
+
+       return dc_link_set_psr_allow_active(link, true, false);
+}
+
+/*
+ * amdgpu_dm_psr_disable() - disable psr f/w
+ * @stream:  stream state
+ *
+ * Return: true if success
+ */
+static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
+{
+
+       DRM_DEBUG_DRIVER("Disabling psr...\n");
+
+       return dc_link_set_psr_allow_active(stream->link, false, true);
+}