]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drm/amd/powerplay: properly set mp1 state for SW SMU suspend/reset routine
[linux.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
index 93cd969e5cf5c6a1c655dbea760d62052c17653f..be01b88db3ec7444ff453a87cf70dbec1875ae8f 100644 (file)
 #include "amdgpu_smu.h"
 #include "soc15_common.h"
 #include "smu_v11_0.h"
+#include "smu_v12_0.h"
 #include "atom.h"
 #include "amd_pcie.h"
 
+#undef __SMU_DUMMY_MAP
+#define __SMU_DUMMY_MAP(type)  #type
+static const char* __smu_message_names[] = {
+       SMU_MESSAGE_TYPES
+};
+
+const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
+{
+       if (type < 0 || type >= SMU_MSG_MAX_COUNT)
+               return "unknown smu message";
+       return __smu_message_names[type];
+}
+
+#undef __SMU_DUMMY_MAP
+#define __SMU_DUMMY_MAP(fea)   #fea
+static const char* __smu_feature_names[] = {
+       SMU_FEATURE_MASKS
+};
+
+const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
+{
+       if (feature < 0 || feature >= SMU_FEATURE_COUNT)
+               return "unknown smu feature";
+       return __smu_feature_names[feature];
+}
+
+size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
+{
+       size_t size = 0;
+       int ret = 0, i = 0;
+       uint32_t feature_mask[2] = { 0 };
+       int32_t feature_index = 0;
+       uint32_t count = 0;
+       uint32_t sort_feature[SMU_FEATURE_COUNT];
+       uint64_t hw_feature_count = 0;
+
+       ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+       if (ret)
+               goto failed;
+
+       size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
+                       feature_mask[1], feature_mask[0]);
+
+       for (i = 0; i < SMU_FEATURE_COUNT; i++) {
+               feature_index = smu_feature_get_index(smu, i);
+               if (feature_index < 0)
+                       continue;
+               sort_feature[feature_index] = i;
+               hw_feature_count++;
+       }
+
+       for (i = 0; i < hw_feature_count; i++) {
+               size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
+                              count++,
+                              smu_get_feature_name(smu, sort_feature[i]),
+                              i,
+                              !!smu_feature_is_enabled(smu, sort_feature[i]) ?
+                              "enabled" : "disabled");
+       }
+
+failed:
+       return size;
+}
+
+static int smu_feature_update_enable_state(struct smu_context *smu,
+                                          uint64_t feature_mask,
+                                          bool enabled)
+{
+       struct smu_feature *feature = &smu->smu_feature;
+       uint32_t feature_low = 0, feature_high = 0;
+       int ret = 0;
+
+       if (!smu->pm_enabled)
+               return ret;
+
+       feature_low = (feature_mask >> 0 ) & 0xffffffff;
+       feature_high = (feature_mask >> 32) & 0xffffffff;
+
+       if (enabled) {
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
+                                                 feature_low);
+               if (ret)
+                       return ret;
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
+                                                 feature_high);
+               if (ret)
+                       return ret;
+       } else {
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
+                                                 feature_low);
+               if (ret)
+                       return ret;
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
+                                                 feature_high);
+               if (ret)
+                       return ret;
+       }
+
+       mutex_lock(&feature->mutex);
+       if (enabled)
+               bitmap_or(feature->enabled, feature->enabled,
+                               (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+       else
+               bitmap_andnot(feature->enabled, feature->enabled,
+                               (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+       mutex_unlock(&feature->mutex);
+
+       return ret;
+}
+
+int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
+{
+       int ret = 0;
+       uint32_t feature_mask[2] = { 0 };
+       uint64_t feature_2_enabled = 0;
+       uint64_t feature_2_disabled = 0;
+       uint64_t feature_enables = 0;
+
+       ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+       if (ret)
+               return ret;
+
+       feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
+
+       feature_2_enabled  = ~feature_enables & new_mask;
+       feature_2_disabled = feature_enables & ~new_mask;
+
+       if (feature_2_enabled) {
+               ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
+               if (ret)
+                       return ret;
+       }
+       if (feature_2_disabled) {
+               ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
+               if (ret)
+                       return ret;
+       }
+
+       return ret;
+}
+
 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
 {
        int ret = 0;
@@ -135,9 +277,8 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
                           uint32_t *min, uint32_t *max)
 {
-       int ret = 0, clk_id = 0;
-       uint32_t param = 0;
        uint32_t clock_limit;
+       int ret = 0;
 
        if (!min && !max)
                return -EINVAL;
@@ -168,36 +309,11 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
 
                return 0;
        }
-
-       mutex_lock(&smu->mutex);
-       clk_id = smu_clk_get_index(smu, clk_type);
-       if (clk_id < 0) {
-               ret = -EINVAL;
-               goto failed;
-       }
-
-       param = (clk_id & 0xffff) << 16;
-
-       if (max) {
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
-               if (ret)
-                       goto failed;
-               ret = smu_read_smc_arg(smu, max);
-               if (ret)
-                       goto failed;
-       }
-
-       if (min) {
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
-               if (ret)
-                       goto failed;
-               ret = smu_read_smc_arg(smu, min);
-               if (ret)
-                       goto failed;
-       }
-
-failed:
-       mutex_unlock(&smu->mutex);
+       /*
+        * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
+        * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
+        */
+       ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
        return ret;
 }
 
@@ -262,7 +378,6 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
        }
 
        if(!smu_feature_is_enabled(smu, feature_id)) {
-               pr_warn("smu %d clk dpm feature %d is not enabled\n", clk_type, feature_id);
                return false;
        }
 
@@ -315,8 +430,13 @@ int smu_get_power_num_states(struct smu_context *smu,
 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
                           void *data, uint32_t *size)
 {
+       struct smu_power_context *smu_power = &smu->smu_power;
+       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
+       if(!data || !size)
+               return -EINVAL;
+
        switch (sensor) {
        case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
                *((uint32_t *)data) = smu->pstate_sclk;
@@ -338,6 +458,10 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
                *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
                *size = 4;
                break;
+       case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
+               *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
+               *size = 4;
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -353,11 +477,12 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
                     void *table_data, bool drv2smu)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
+       struct amdgpu_device *adev = smu->adev;
        struct smu_table *table = NULL;
        int ret = 0;
        int table_id = smu_table_get_index(smu, table_index);
 
-       if (!table_data || table_id >= smu_table->table_count)
+       if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
                return -EINVAL;
 
        table = &smu_table->tables[table_index];
@@ -380,6 +505,9 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
        if (ret)
                return ret;
 
+       /* flush hdp cache */
+       adev->nbio.funcs->hdp_flush(adev, NULL);
+
        if (!drv2smu)
                memcpy(table_data, table->cpu_addr, table->size);
 
@@ -390,12 +518,23 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
 {
        if (adev->asic_type == CHIP_VEGA20)
                return (amdgpu_dpm == 2) ? true : false;
-       else if (adev->asic_type >= CHIP_NAVI10)
+       else if (adev->asic_type >= CHIP_ARCTURUS)
                return true;
        else
                return false;
 }
 
+bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
+{
+       if (amdgpu_dpm != 1)
+               return false;
+
+       if (adev->asic_type == CHIP_VEGA20)
+               return true;
+
+       return false;
+}
+
 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
@@ -474,13 +613,20 @@ int smu_feature_init_dpm(struct smu_context *smu)
        return ret;
 }
 
+
 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
 {
+       struct amdgpu_device *adev = smu->adev;
        struct smu_feature *feature = &smu->smu_feature;
-       uint32_t feature_id;
+       int feature_id;
        int ret = 0;
 
+       if (adev->flags & AMD_IS_APU)
+               return 1;
+
        feature_id = smu_feature_get_index(smu, mask);
+       if (feature_id < 0)
+               return 0;
 
        WARN_ON(feature_id > feature->feature_num);
 
@@ -495,36 +641,28 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
                            bool enable)
 {
        struct smu_feature *feature = &smu->smu_feature;
-       uint32_t feature_id;
-       int ret = 0;
+       int feature_id;
 
        feature_id = smu_feature_get_index(smu, mask);
+       if (feature_id < 0)
+               return -EINVAL;
 
        WARN_ON(feature_id > feature->feature_num);
 
-       mutex_lock(&feature->mutex);
-       ret = smu_feature_update_enable_state(smu, feature_id, enable);
-       if (ret)
-               goto failed;
-
-       if (enable)
-               test_and_set_bit(feature_id, feature->enabled);
-       else
-               test_and_clear_bit(feature_id, feature->enabled);
-
-failed:
-       mutex_unlock(&feature->mutex);
-
-       return ret;
+       return smu_feature_update_enable_state(smu,
+                                              1ULL << feature_id,
+                                              enable);
 }
 
 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
 {
        struct smu_feature *feature = &smu->smu_feature;
-       uint32_t feature_id;
+       int feature_id;
        int ret = 0;
 
        feature_id = smu_feature_get_index(smu, mask);
+       if (feature_id < 0)
+               return 0;
 
        WARN_ON(feature_id > feature->feature_num);
 
@@ -540,10 +678,12 @@ int smu_feature_set_supported(struct smu_context *smu,
                              bool enable)
 {
        struct smu_feature *feature = &smu->smu_feature;
-       uint32_t feature_id;
+       int feature_id;
        int ret = 0;
 
        feature_id = smu_feature_get_index(smu, mask);
+       if (feature_id < 0)
+               return -EINVAL;
 
        WARN_ON(feature_id > feature->feature_num);
 
@@ -564,10 +704,18 @@ static int smu_set_funcs(struct amdgpu_device *adev)
        switch (adev->asic_type) {
        case CHIP_VEGA20:
        case CHIP_NAVI10:
+       case CHIP_NAVI14:
+       case CHIP_NAVI12:
+       case CHIP_ARCTURUS:
                if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
                        smu->od_enabled = true;
                smu_v11_0_set_smu_funcs(smu);
                break;
+       case CHIP_RENOIR:
+               if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
+                       smu->od_enabled = true;
+               smu_v12_0_set_smu_funcs(smu);
+               break;
        default:
                return -EINVAL;
        }
@@ -594,6 +742,7 @@ static int smu_late_init(void *handle)
 
        if (!smu->pm_enabled)
                return 0;
+
        mutex_lock(&smu->mutex);
        smu_handle_task(&adev->smu,
                        smu->smu_dpm.dpm_level,
@@ -762,14 +911,10 @@ static int smu_init_fb_allocations(struct smu_context *smu)
        struct amdgpu_device *adev = smu->adev;
        struct smu_table_context *smu_table = &smu->smu_table;
        struct smu_table *tables = smu_table->tables;
-       uint32_t table_count = smu_table->table_count;
        uint32_t i = 0;
        int32_t ret = 0;
 
-       if (table_count <= 0)
-               return -EINVAL;
-
-       for (i = 0 ; i < table_count; i++) {
+       for (i = 0; i < SMU_TABLE_COUNT; i++) {
                if (tables[i].size == 0)
                        continue;
                ret = amdgpu_bo_create_kernel(adev,
@@ -800,13 +945,12 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
        struct smu_table *tables = smu_table->tables;
-       uint32_t table_count = smu_table->table_count;
        uint32_t i = 0;
 
-       if (table_count == 0 || tables == NULL)
+       if (!tables)
                return 0;
 
-       for (i = 0 ; i < table_count; i++) {
+       for (i = 0; i < SMU_TABLE_COUNT; i++) {
                if (tables[i].size == 0)
                        continue;
                amdgpu_bo_free_kernel(&tables[i].bo,
@@ -823,6 +967,9 @@ static int smu_override_pcie_parameters(struct smu_context *smu)
        uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
        int ret;
 
+       if (adev->flags & AMD_IS_APU)
+               return 0;
+
        if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
                pcie_gen = 3;
        else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -869,9 +1016,11 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
                return 0;
        }
 
-       ret = smu_init_display_count(smu, 0);
-       if (ret)
-               return ret;
+       if (adev->asic_type != CHIP_ARCTURUS) {
+               ret = smu_init_display_count(smu, 0);
+               if (ret)
+                       return ret;
+       }
 
        if (initialize) {
                /* get boot_values from vbios to set revision, gfxclk, and etc. */
@@ -920,6 +1069,8 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
                        return ret;
        }
 
+       /* smu_dump_pptable(smu); */
+
        /*
         * Copy pptable bo in the vram to smc with SMU MSGs such as
         * SetDriverDramAddr and TransferTableDram2Smu.
@@ -928,8 +1079,8 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
        if (ret)
                return ret;
 
-       /* issue RunAfllBtc msg */
-       ret = smu_run_afll_btc(smu);
+       /* issue Run*Btc msg */
+       ret = smu_run_btc(smu);
        if (ret)
                return ret;
 
@@ -941,21 +1092,23 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
        if (ret)
                return ret;
 
-       ret = smu_override_pcie_parameters(smu);
-       if (ret)
-               return ret;
+       if (adev->asic_type != CHIP_ARCTURUS) {
+               ret = smu_override_pcie_parameters(smu);
+               if (ret)
+                       return ret;
 
-       ret = smu_notify_display_change(smu);
-       if (ret)
-               return ret;
+               ret = smu_notify_display_change(smu);
+               if (ret)
+                       return ret;
 
-       /*
-        * Set min deep sleep dce fclk with bootup value from vbios via
-        * SetMinDeepSleepDcefclk MSG.
-        */
-       ret = smu_set_min_dcef_deep_sleep(smu);
-       if (ret)
-               return ret;
+               /*
+                * Set min deep sleep dce fclk with bootup value from vbios via
+                * SetMinDeepSleepDcefclk MSG.
+                */
+               ret = smu_set_min_dcef_deep_sleep(smu);
+               if (ret)
+                       return ret;
+       }
 
        /*
         * Set initialized values (get from vbios) to dpm tables context such as
@@ -963,7 +1116,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
         * type of clks.
         */
        if (initialize) {
-               ret = smu_populate_smc_pptable(smu);
+               ret = smu_populate_smc_tables(smu);
                if (ret)
                        return ret;
 
@@ -981,7 +1134,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
                if (ret)
                        return ret;
 
-               ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
+               ret = smu_get_power_limit(smu, &smu->default_power_limit, true);
                if (ret)
                        return ret;
        }
@@ -1066,14 +1219,28 @@ static int smu_hw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = &adev->smu;
 
-       if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
-               ret = smu_check_fw_status(smu);
-               if (ret) {
-                       pr_err("SMC firmware status is not correct\n");
-                       return ret;
+       if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+               if (adev->asic_type < CHIP_NAVI10) {
+                       ret = smu_load_microcode(smu);
+                       if (ret)
+                               return ret;
                }
        }
 
+       ret = smu_check_fw_status(smu);
+       if (ret) {
+               pr_err("SMC firmware status is not correct\n");
+               return ret;
+       }
+
+       if (adev->flags & AMD_IS_APU) {
+               smu_powergate_sdma(&adev->smu, false);
+               smu_powergate_vcn(&adev->smu, false);
+       }
+
+       if (!smu->pm_enabled)
+               return 0;
+
        ret = smu_feature_init_dpm(smu);
        if (ret)
                goto failed;
@@ -1111,6 +1278,11 @@ static int smu_hw_init(void *handle)
        return ret;
 }
 
+static int smu_stop_dpms(struct smu_context *smu)
+{
+       return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
+}
+
 static int smu_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1118,6 +1290,23 @@ static int smu_hw_fini(void *handle)
        struct smu_table_context *table_context = &smu->smu_table;
        int ret = 0;
 
+       if (adev->flags & AMD_IS_APU) {
+               smu_powergate_sdma(&adev->smu, true);
+               smu_powergate_vcn(&adev->smu, true);
+       }
+
+       ret = smu_stop_thermal_control(smu);
+       if (ret) {
+               pr_warn("Fail to stop thermal control!\n");
+               return ret;
+       }
+
+       ret = smu_stop_dpms(smu);
+       if (ret) {
+               pr_warn("Fail to stop Dpms!\n");
+               return ret;
+       }
+
        kfree(table_context->driver_pptable);
        table_context->driver_pptable = NULL;
 
@@ -1425,6 +1614,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
 
        if (!smu->pm_enabled)
                return -EINVAL;
+
        if (!skip_display_settings) {
                ret = smu_display_config_changed(smu);
                if (ret) {
@@ -1433,8 +1623,6 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
                }
        }
 
-       if (!smu->pm_enabled)
-               return -EINVAL;
        ret = smu_apply_clocks_adjust_rules(smu);
        if (ret) {
                pr_err("Failed to apply clocks adjust rules!");
@@ -1453,9 +1641,14 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
                ret = smu_asic_set_performance_level(smu, level);
                if (ret) {
                        ret = smu_default_set_performance_level(smu, level);
+                       if (ret) {
+                               pr_err("Failed to set performance level!");
+                               return ret;
+                       }
                }
-               if (!ret)
-                       smu_dpm_ctx->dpm_level = level;
+
+               /* update the saved copy */
+               smu_dpm_ctx->dpm_level = level;
        }
 
        if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
@@ -1497,6 +1690,42 @@ int smu_handle_task(struct smu_context *smu,
        return ret;
 }
 
+int smu_switch_power_profile(struct smu_context *smu,
+                            enum PP_SMC_POWER_PROFILE type,
+                            bool en)
+{
+       struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+       long workload;
+       uint32_t index;
+
+       if (!smu->pm_enabled)
+               return -EINVAL;
+
+       if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
+               return -EINVAL;
+
+       mutex_lock(&smu->mutex);
+
+       if (!en) {
+               smu->workload_mask &= ~(1 << smu->workload_prority[type]);
+               index = fls(smu->workload_mask);
+               index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+               workload = smu->workload_setting[index];
+       } else {
+               smu->workload_mask |= (1 << smu->workload_prority[type]);
+               index = fls(smu->workload_mask);
+               index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+               workload = smu->workload_setting[index];
+       }
+
+       if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+               smu_set_power_profile_mode(smu, &workload, 0);
+
+       mutex_unlock(&smu->mutex);
+
+       return 0;
+}
+
 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
 {
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@@ -1514,28 +1743,18 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
 
 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
 {
-       int ret = 0;
-       int i;
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+       int ret = 0;
 
        if (!smu_dpm_ctx->dpm_context)
                return -EINVAL;
 
-       for (i = 0; i < smu->adev->num_ip_blocks; i++) {
-               if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
-                       break;
-       }
-
-
-       smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
-       ret = smu_handle_task(smu, level,
-                             AMD_PP_TASK_READJUST_POWER_STATE);
+       ret = smu_enable_umd_pstate(smu, &level);
        if (ret)
                return ret;
 
-       mutex_lock(&smu->mutex);
-       smu_dpm_ctx->dpm_level = level;
-       mutex_unlock(&smu->mutex);
+       ret = smu_handle_task(smu, level,
+                             AMD_PP_TASK_READJUST_POWER_STATE);
 
        return ret;
 }
@@ -1551,6 +1770,64 @@ int smu_set_display_count(struct smu_context *smu, uint32_t count)
        return ret;
 }
 
+int smu_force_clk_levels(struct smu_context *smu,
+                        enum smu_clk_type clk_type,
+                        uint32_t mask)
+{
+       struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+       int ret = 0;
+
+       if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+               pr_debug("force clock level is for dpm manual mode only.\n");
+               return -EINVAL;
+       }
+
+       if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
+               ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
+
+       return ret;
+}
+
+int smu_set_mp1_state(struct smu_context *smu,
+                     enum pp_mp1_state mp1_state)
+{
+       uint16_t msg;
+       int ret;
+
+       /*
+        * The SMC is not fully ready. That may be
+        * expected as the IP may be masked.
+        * So, just return without error.
+        */
+       if (!smu->pm_enabled)
+               return 0;
+
+       switch (mp1_state) {
+       case PP_MP1_STATE_SHUTDOWN:
+               msg = SMU_MSG_PrepareMp1ForShutdown;
+               break;
+       case PP_MP1_STATE_UNLOAD:
+               msg = SMU_MSG_PrepareMp1ForUnload;
+               break;
+       case PP_MP1_STATE_RESET:
+               msg = SMU_MSG_PrepareMp1ForReset;
+               break;
+       case PP_MP1_STATE_NONE:
+       default:
+               return 0;
+       }
+
+       /* some asics may not support those messages */
+       if (smu_msg_get_index(smu, msg) < 0)
+               return 0;
+
+       ret = smu_send_smc_msg(smu, msg);
+       if (ret)
+               pr_err("[PrepareMp1] Failed!\n");
+
+       return ret;
+}
+
 const struct amd_ip_funcs smu_ip_funcs = {
        .name = "smu",
        .early_init = smu_early_init,
@@ -1578,3 +1855,12 @@ const struct amdgpu_ip_block_version smu_v11_0_ip_block =
        .rev = 0,
        .funcs = &smu_ip_funcs,
 };
+
+const struct amdgpu_ip_block_version smu_v12_0_ip_block =
+{
+       .type = AMD_IP_BLOCK_TYPE_SMC,
+       .major = 12,
+       .minor = 0,
+       .rev = 0,
+       .funcs = &smu_ip_funcs,
+};