]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drm/amdgpu/smu11: add support for navi14
[linux.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
index eec329ab6037039d2a5853e0c914b5f48284b278..d977d68320c977c7a3786c670f8f5622dce08aab 100644 (file)
@@ -20,9 +20,9 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include "pp_debug.h"
 #include <linux/firmware.h>
-#include <drm/drmP.h>
+
+#include "pp_debug.h"
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
 #include "soc15_common.h"
 #include "atom.h"
 #include "amd_pcie.h"
 
+int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
+{
+       int ret = 0;
+
+       if (!if_version && !smu_version)
+               return -EINVAL;
+
+       if (if_version) {
+               ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
+               if (ret)
+                       return ret;
+
+               ret = smu_read_smc_arg(smu, if_version);
+               if (ret)
+                       return ret;
+       }
+
+       if (smu_version) {
+               ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
+               if (ret)
+                       return ret;
+
+               ret = smu_read_smc_arg(smu, smu_version);
+               if (ret)
+                       return ret;
+       }
+
+       return ret;
+}
+
+int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+                           uint32_t min, uint32_t max)
+{
+       int ret = 0, clk_id = 0;
+       uint32_t param;
+
+       if (min <= 0 && max <= 0)
+               return -EINVAL;
+
+       if (!smu_clk_dpm_is_enabled(smu, clk_type))
+               return 0;
+
+       clk_id = smu_clk_get_index(smu, clk_type);
+       if (clk_id < 0)
+               return clk_id;
+
+       if (max > 0) {
+               param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+                                                 param);
+               if (ret)
+                       return ret;
+       }
+
+       if (min > 0) {
+               param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+                                                 param);
+               if (ret)
+                       return ret;
+       }
+
+
+       return ret;
+}
+
+int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+                           uint32_t min, uint32_t max)
+{
+       int ret = 0, clk_id = 0;
+       uint32_t param;
+
+       if (min <= 0 && max <= 0)
+               return -EINVAL;
+
+       if (!smu_clk_dpm_is_enabled(smu, clk_type))
+               return 0;
+
+       clk_id = smu_clk_get_index(smu, clk_type);
+       if (clk_id < 0)
+               return clk_id;
+
+       if (max > 0) {
+               param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
+                                                 param);
+               if (ret)
+                       return ret;
+       }
+
+       if (min > 0) {
+               param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
+                                                 param);
+               if (ret)
+                       return ret;
+       }
+
+
+       return ret;
+}
+
+int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+                          uint32_t *min, uint32_t *max)
+{
+       int ret = 0, clk_id = 0;
+       uint32_t param = 0;
+
+       if (!min && !max)
+               return -EINVAL;
+
+       if (!smu_clk_dpm_is_enabled(smu, clk_type))
+               return 0;
+
+       mutex_lock(&smu->mutex);
+       clk_id = smu_clk_get_index(smu, clk_type);
+       if (clk_id < 0) {
+               ret = -EINVAL;
+               goto failed;
+       }
+
+       param = (clk_id & 0xffff) << 16;
+
+       if (max) {
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
+               if (ret)
+                       goto failed;
+               ret = smu_read_smc_arg(smu, max);
+               if (ret)
+                       goto failed;
+       }
+
+       if (min) {
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
+               if (ret)
+                       goto failed;
+               ret = smu_read_smc_arg(smu, min);
+               if (ret)
+                       goto failed;
+       }
+
+failed:
+       mutex_unlock(&smu->mutex);
+       return ret;
+}
+
+int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
+                             uint16_t level, uint32_t *value)
+{
+       int ret = 0, clk_id = 0;
+       uint32_t param;
+
+       if (!value)
+               return -EINVAL;
+
+       if (!smu_clk_dpm_is_enabled(smu, clk_type))
+               return 0;
+
+       clk_id = smu_clk_get_index(smu, clk_type);
+       if (clk_id < 0)
+               return clk_id;
+
+       param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
+
+       ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
+                                         param);
+       if (ret)
+               return ret;
+
+       ret = smu_read_smc_arg(smu, &param);
+       if (ret)
+               return ret;
+
+       /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
+        * now, we un-support it */
+       *value = param & 0x7fffffff;
+
+       return ret;
+}
+
+int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
+                           uint32_t *value)
+{
+       return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
+}
+
+bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
+{
+       enum smu_feature_mask feature_id = 0;
+
+       switch (clk_type) {
+       case SMU_MCLK:
+       case SMU_UCLK:
+               feature_id = SMU_FEATURE_DPM_UCLK_BIT;
+               break;
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+               feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
+               break;
+       case SMU_SOCCLK:
+               feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
+               break;
+       default:
+               return true;
+       }
+
+       if(!smu_feature_is_enabled(smu, feature_id)) {
+               pr_warn("smu %d clk dpm feature %d is not enabled\n", clk_type, feature_id);
+               return false;
+       }
+
+       return true;
+}
+
+
 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
                           bool gate)
 {
@@ -42,6 +257,9 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
        case AMD_IP_BLOCK_TYPE_VCE:
                ret = smu_dpm_set_vce_enable(smu, gate);
                break;
+       case AMD_IP_BLOCK_TYPE_GFX:
+               ret = smu_gfx_off_control(smu, gate);
+               break;
        default:
                break;
        }
@@ -86,6 +304,14 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
                ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
                *size = 8;
                break;
+       case AMDGPU_PP_SENSOR_UVD_POWER:
+               *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_VCE_POWER:
+               *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
+               *size = 4;
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -97,20 +323,18 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
        return ret;
 }
 
-int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg,
+int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
                     void *table_data, bool drv2smu)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
        struct smu_table *table = NULL;
        int ret = 0;
-       uint32_t table_index;
+       int table_id = smu_table_get_index(smu, table_index);
 
        if (!table_data || table_id >= smu_table->table_count)
                return -EINVAL;
 
-       table_index = (exarg << 16) | table_id;
-
-       table = &smu_table->tables[table_id];
+       table = &smu_table->tables[table_index];
 
        if (drv2smu)
                memcpy(table->cpu_addr, table_data, table->size);
@@ -126,7 +350,7 @@ int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16
        ret = smu_send_smc_msg_with_param(smu, drv2smu ?
                                          SMU_MSG_TransferTableDram2Smu :
                                          SMU_MSG_TransferTableSmu2Dram,
-                                         table_index);
+                                         table_id | ((argument & 0xFFFF) << 16));
        if (ret)
                return ret;
 
@@ -138,13 +362,12 @@ int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16
 
 bool is_support_sw_smu(struct amdgpu_device *adev)
 {
-       if (amdgpu_dpm != 1)
-               return false;
-
-       if (adev->asic_type >= CHIP_VEGA20 && adev->asic_type != CHIP_RAVEN)
+       if (adev->asic_type == CHIP_VEGA20)
+               return (amdgpu_dpm == 2) ? true : false;
+       else if (adev->asic_type >= CHIP_NAVI10)
                return true;
-
-       return false;
+       else
+               return false;
 }
 
 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
@@ -168,6 +391,8 @@ int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
        ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
        int ret = 0;
 
+       if (!smu->pm_enabled)
+               return -EINVAL;
        if (header->usStructureSize != size) {
                pr_err("pp table size not matched !\n");
                return -EIO;
@@ -201,31 +426,36 @@ int smu_feature_init_dpm(struct smu_context *smu)
 {
        struct smu_feature *feature = &smu->smu_feature;
        int ret = 0;
-       uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];
+       uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
 
+       if (!smu->pm_enabled)
+               return ret;
        mutex_lock(&feature->mutex);
-       bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
+       bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
        mutex_unlock(&feature->mutex);
 
-       ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask,
+       ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
                                             SMU_FEATURE_MAX/32);
        if (ret)
                return ret;
 
        mutex_lock(&feature->mutex);
-       bitmap_andnot(feature->allowed, feature->allowed,
-                     (unsigned long *)unallowed_feature_mask,
+       bitmap_or(feature->allowed, feature->allowed,
+                     (unsigned long *)allowed_feature_mask,
                      feature->feature_num);
        mutex_unlock(&feature->mutex);
 
        return ret;
 }
 
-int smu_feature_is_enabled(struct smu_context *smu, int feature_id)
+int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
 {
        struct smu_feature *feature = &smu->smu_feature;
+       uint32_t feature_id;
        int ret = 0;
 
+       feature_id = smu_feature_get_index(smu, mask);
+
        WARN_ON(feature_id > feature->feature_num);
 
        mutex_lock(&feature->mutex);
@@ -235,11 +465,15 @@ int smu_feature_is_enabled(struct smu_context *smu, int feature_id)
        return ret;
 }
 
-int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable)
+int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
+                           bool enable)
 {
        struct smu_feature *feature = &smu->smu_feature;
+       uint32_t feature_id;
        int ret = 0;
 
+       feature_id = smu_feature_get_index(smu, mask);
+
        WARN_ON(feature_id > feature->feature_num);
 
        mutex_lock(&feature->mutex);
@@ -258,11 +492,14 @@ int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable
        return ret;
 }
 
-int smu_feature_is_supported(struct smu_context *smu, int feature_id)
+int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
 {
        struct smu_feature *feature = &smu->smu_feature;
+       uint32_t feature_id;
        int ret = 0;
 
+       feature_id = smu_feature_get_index(smu, mask);
+
        WARN_ON(feature_id > feature->feature_num);
 
        mutex_lock(&feature->mutex);
@@ -272,12 +509,16 @@ int smu_feature_is_supported(struct smu_context *smu, int feature_id)
        return ret;
 }
 
-int smu_feature_set_supported(struct smu_context *smu, int feature_id,
+int smu_feature_set_supported(struct smu_context *smu,
+                             enum smu_feature_mask mask,
                              bool enable)
 {
        struct smu_feature *feature = &smu->smu_feature;
+       uint32_t feature_id;
        int ret = 0;
 
+       feature_id = smu_feature_get_index(smu, mask);
+
        WARN_ON(feature_id > feature->feature_num);
 
        mutex_lock(&feature->mutex);
@@ -296,7 +537,8 @@ static int smu_set_funcs(struct amdgpu_device *adev)
 
        switch (adev->asic_type) {
        case CHIP_VEGA20:
-               adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+       case CHIP_NAVI10:
+       case CHIP_NAVI14:
                if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
                        smu->od_enabled = true;
                smu_v11_0_set_smu_funcs(smu);
@@ -314,6 +556,7 @@ static int smu_early_init(void *handle)
        struct smu_context *smu = &adev->smu;
 
        smu->adev = adev;
+       smu->pm_enabled = !!amdgpu_dpm;
        mutex_init(&smu->mutex);
 
        return smu_set_funcs(adev);
@@ -323,6 +566,9 @@ static int smu_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = &adev->smu;
+
+       if (!smu->pm_enabled)
+               return 0;
        mutex_lock(&smu->mutex);
        smu_handle_task(&adev->smu,
                        smu->smu_dpm.dpm_level,
@@ -406,15 +652,17 @@ static int smu_sw_init(void *handle)
        struct smu_context *smu = &adev->smu;
        int ret;
 
-       if (!is_support_sw_smu(adev))
-               return -EINVAL;
-
        smu->pool_size = adev->pm.smu_prv_buffer_size;
        smu->smu_feature.feature_num = SMU_FEATURE_MAX;
        mutex_init(&smu->smu_feature.mutex);
        bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
        bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
        bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
+
+       mutex_init(&smu->smu_baco.mutex);
+       smu->smu_baco.state = SMU_BACO_STATE_EXIT;
+       smu->smu_baco.platform_support = false;
+
        smu->watermarks_bitmap = 0;
        smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
        smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
@@ -460,9 +708,6 @@ static int smu_sw_fini(void *handle)
        struct smu_context *smu = &adev->smu;
        int ret;
 
-       if (!is_support_sw_smu(adev))
-               return -EINVAL;
-
        ret = smu_smc_table_sw_fini(smu);
        if (ret) {
                pr_err("Failed to sw fini smc table!\n");
@@ -590,17 +835,17 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
                return 0;
        }
 
-       ret = smu_init_display(smu);
+       ret = smu_init_display_count(smu, 0);
        if (ret)
                return ret;
 
        if (initialize) {
-               ret = smu_read_pptable_from_vbios(smu);
+               /* get boot_values from vbios to set revision, gfxclk, and etc. */
+               ret = smu_get_vbios_bootup_values(smu);
                if (ret)
                        return ret;
 
-               /* get boot_values from vbios to set revision, gfxclk, and etc. */
-               ret = smu_get_vbios_bootup_values(smu);
+               ret = smu_setup_pptable(smu);
                if (ret)
                        return ret;
 
@@ -612,10 +857,6 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
                 * check if the format_revision in vbios is up to pptable header
                 * version, and the structure size is not 0.
                 */
-               ret = smu_get_clk_info_from_vbios(smu);
-               if (ret)
-                       return ret;
-
                ret = smu_check_pptable(smu);
                if (ret)
                        return ret;
@@ -697,7 +938,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
                        return ret;
        }
 
-       ret = smu_set_od8_default_settings(smu, initialize);
+       ret = smu_set_default_od_settings(smu, initialize);
        if (ret)
                return ret;
 
@@ -716,6 +957,9 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
         */
        ret = smu_set_tool_table_location(smu);
 
+       if (!smu_is_dpm_running(smu))
+               pr_info("dpm has been disabled\n");
+
        return ret;
 }
 
@@ -788,23 +1032,14 @@ static int smu_hw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = &adev->smu;
 
-       if (!is_support_sw_smu(adev))
-               return -EINVAL;
-
-       if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
-               ret = smu_load_microcode(smu);
-               if (ret)
+       if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+               ret = smu_check_fw_status(smu);
+               if (ret) {
+                       pr_err("SMC firmware status is not correct\n");
                        return ret;
+               }
        }
 
-       ret = smu_check_fw_status(smu);
-       if (ret) {
-               pr_err("SMC firmware status is not correct\n");
-               return ret;
-       }
-
-       mutex_lock(&smu->mutex);
-
        ret = smu_feature_init_dpm(smu);
        if (ret)
                goto failed;
@@ -829,16 +1064,20 @@ static int smu_hw_init(void *handle)
        if (ret)
                goto failed;
 
-       mutex_unlock(&smu->mutex);
+       ret = smu_register_irq_handler(smu);
+       if (ret)
+               goto failed;
 
-       adev->pm.dpm_enabled = true;
+       if (!smu->pm_enabled)
+               adev->pm.dpm_enabled = false;
+       else
+               adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
 
        pr_info("SMU is initialized successfully!\n");
 
        return 0;
 
 failed:
-       mutex_unlock(&smu->mutex);
        return ret;
 }
 
@@ -849,29 +1088,17 @@ static int smu_hw_fini(void *handle)
        struct smu_table_context *table_context = &smu->smu_table;
        int ret = 0;
 
-       if (!is_support_sw_smu(adev))
-               return -EINVAL;
-
        kfree(table_context->driver_pptable);
        table_context->driver_pptable = NULL;
 
        kfree(table_context->max_sustainable_clocks);
        table_context->max_sustainable_clocks = NULL;
 
-       kfree(table_context->od_feature_capabilities);
-       table_context->od_feature_capabilities = NULL;
-
-       kfree(table_context->od_settings_max);
-       table_context->od_settings_max = NULL;
-
-       kfree(table_context->od_settings_min);
-       table_context->od_settings_min = NULL;
-
        kfree(table_context->overdrive_table);
        table_context->overdrive_table = NULL;
 
-       kfree(table_context->od8_settings);
-       table_context->od8_settings = NULL;
+       kfree(smu->irq_source);
+       smu->irq_source = NULL;
 
        ret = smu_fini_fb_allocations(smu);
        if (ret)
@@ -905,16 +1132,26 @@ static int smu_suspend(void *handle)
        int ret;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = &adev->smu;
-
-       if (!is_support_sw_smu(adev))
-               return -EINVAL;
+       bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
 
        ret = smu_system_features_control(smu, false);
        if (ret)
                return ret;
 
+       if (adev->in_gpu_reset && baco_feature_is_enabled) {
+               ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
+               if (ret) {
+                       pr_warn("set BACO feature enabled failed, return %d\n", ret);
+                       return ret;
+               }
+       }
+
        smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
 
+       if (adev->asic_type >= CHIP_NAVI10 &&
+           adev->gfx.rlc.funcs->stop)
+               adev->gfx.rlc.funcs->stop(adev);
+
        return 0;
 }
 
@@ -924,9 +1161,6 @@ static int smu_resume(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = &adev->smu;
 
-       if (!is_support_sw_smu(adev))
-               return -EINVAL;
-
        pr_info("SMU is resuming...\n");
 
        mutex_lock(&smu->mutex);
@@ -955,7 +1189,7 @@ int smu_display_configuration_change(struct smu_context *smu,
        int index = 0;
        int num_of_active_display = 0;
 
-       if (!is_support_sw_smu(smu->adev))
+       if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
                return -EINVAL;
 
        if (!display_config)
@@ -1083,7 +1317,7 @@ static int smu_enable_umd_pstate(void *handle,
 
        struct smu_context *smu = (struct smu_context*)(handle);
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
-       if (!smu_dpm_ctx->dpm_context)
+       if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
                return -EINVAL;
 
        if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
@@ -1126,6 +1360,8 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
        long workload;
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 
+       if (!smu->pm_enabled)
+               return -EINVAL;
        if (!skip_display_settings) {
                ret = smu_display_config_changed(smu);
                if (ret) {
@@ -1134,6 +1370,8 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
                }
        }
 
+       if (!smu->pm_enabled)
+               return -EINVAL;
        ret = smu_apply_clocks_adjust_rules(smu);
        if (ret) {
                pr_err("Failed to apply clocks adjust rules!");
@@ -1158,10 +1396,10 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
                        break;
 
                case AMD_DPM_FORCED_LEVEL_AUTO:
+               case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
                        ret = smu_unforce_dpm_levels(smu);
                        break;
 
-               case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
                case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
                case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
                case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
@@ -1171,8 +1409,9 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
                                                         &soc_mask);
                        if (ret)
                                return ret;
-                       smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
-                       smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
+                       smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
+                       smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+                       smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
                        break;
 
                case AMD_DPM_FORCED_LEVEL_MANUAL:
@@ -1224,6 +1463,60 @@ int smu_handle_task(struct smu_context *smu,
        return ret;
 }
 
+enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
+{
+       struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+       enum amd_dpm_forced_level level;
+
+       if (!smu_dpm_ctx->dpm_context)
+               return -EINVAL;
+
+       mutex_lock(&(smu->mutex));
+       level = smu_dpm_ctx->dpm_level;
+       mutex_unlock(&(smu->mutex));
+
+       return level;
+}
+
+int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+       int ret = 0;
+       int i;
+       struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+       if (!smu_dpm_ctx->dpm_context)
+               return -EINVAL;
+
+       for (i = 0; i < smu->adev->num_ip_blocks; i++) {
+               if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
+                       break;
+       }
+
+
+       smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
+       ret = smu_handle_task(smu, level,
+                             AMD_PP_TASK_READJUST_POWER_STATE);
+       if (ret)
+               return ret;
+
+       mutex_lock(&smu->mutex);
+       smu_dpm_ctx->dpm_level = level;
+       mutex_unlock(&smu->mutex);
+
+       return ret;
+}
+
+int smu_set_display_count(struct smu_context *smu, uint32_t count)
+{
+       int ret = 0;
+
+       mutex_lock(&smu->mutex);
+       ret = smu_init_display_count(smu, count);
+       mutex_unlock(&smu->mutex);
+
+       return ret;
+}
+
 const struct amd_ip_funcs smu_ip_funcs = {
        .name = "smu",
        .early_init = smu_early_init,