]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drm/amd/powerplay:clean up the residual mutex for smu_hw_init
[linux.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
index a48ca6a4353c3a8ce7bf53f29ca501184e03523b..9559127dd0ece31d5d905bfcd2f7ca32a8b02544 100644 (file)
@@ -20,9 +20,9 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include "pp_debug.h"
 #include <linux/firmware.h>
-#include <drm/drmP.h>
+
+#include "pp_debug.h"
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
 #include "soc15_common.h"
@@ -60,6 +60,167 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
        return ret;
 }
 
+int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+                           uint32_t min, uint32_t max)
+{
+       int ret = 0, clk_id = 0;
+       uint32_t param;
+
+       if (min <= 0 && max <= 0)
+               return -EINVAL;
+
+       clk_id = smu_clk_get_index(smu, clk_type);
+       if (clk_id < 0)
+               return clk_id;
+
+       if (max > 0) {
+               param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+                                                 param);
+               if (ret)
+                       return ret;
+       }
+
+       if (min > 0) {
+               param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+                                                 param);
+               if (ret)
+                       return ret;
+       }
+
+
+       return ret;
+}
+
+int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+                           uint32_t min, uint32_t max)
+{
+       int ret = 0, clk_id = 0;
+       uint32_t param;
+
+       if (min <= 0 && max <= 0)
+               return -EINVAL;
+
+       clk_id = smu_clk_get_index(smu, clk_type);
+       if (clk_id < 0)
+               return clk_id;
+
+       if (max > 0) {
+               param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
+                                                 param);
+               if (ret)
+                       return ret;
+       }
+
+       if (min > 0) {
+               param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
+                                                 param);
+               if (ret)
+                       return ret;
+       }
+
+
+       return ret;
+}
+
+int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+                          uint32_t *min, uint32_t *max)
+{
+       int ret = 0, clk_id = 0;
+       uint32_t param = 0;
+
+       if (!min && !max)
+               return -EINVAL;
+
+       switch (clk_type) {
+       case SMU_UCLK:
+               if (!smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+                       pr_warn("uclk dpm is not enabled\n");
+                       return 0;
+               }
+               break;
+       case SMU_GFXCLK:
+               if (!smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+                       pr_warn("gfxclk dpm is not enabled\n");
+                       return 0;
+               }
+               break;
+       default:
+               break;
+       }
+
+       mutex_lock(&smu->mutex);
+       clk_id = smu_clk_get_index(smu, clk_type);
+       if (clk_id < 0) {
+               ret = -EINVAL;
+               goto failed;
+       }
+
+       param = (clk_id & 0xffff) << 16;
+
+       if (max) {
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
+               if (ret)
+                       goto failed;
+               ret = smu_read_smc_arg(smu, max);
+               if (ret)
+                       goto failed;
+       }
+
+       if (min) {
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
+               if (ret)
+                       goto failed;
+               ret = smu_read_smc_arg(smu, min);
+               if (ret)
+                       goto failed;
+       }
+
+failed:
+       mutex_unlock(&smu->mutex);
+       return ret;
+}
+
+int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
+                             uint16_t level, uint32_t *value)
+{
+       int ret = 0, clk_id = 0;
+       uint32_t param;
+
+       if (!value)
+               return -EINVAL;
+
+       clk_id = smu_clk_get_index(smu, clk_type);
+       if (clk_id < 0)
+               return clk_id;
+
+       param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
+
+       ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
+                                         param);
+       if (ret)
+               return ret;
+
+       ret = smu_read_smc_arg(smu, &param);
+       if (ret)
+               return ret;
+
+       /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
+        * now, we un-support it */
+       *value = param & 0x7fffffff;
+
+       return ret;
+}
+
+int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
+                           uint32_t *value)
+{
+       return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
+}
+
 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
                           bool gate)
 {
@@ -72,6 +233,9 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
        case AMD_IP_BLOCK_TYPE_VCE:
                ret = smu_dpm_set_vce_enable(smu, gate);
                break;
+       case AMD_IP_BLOCK_TYPE_GFX:
+               ret = smu_gfx_off_control(smu, gate);
+               break;
        default:
                break;
        }
@@ -116,6 +280,14 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
                ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
                *size = 8;
                break;
+       case AMDGPU_PP_SENSOR_UVD_POWER:
+               *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_VCE_POWER:
+               *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
+               *size = 4;
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -127,20 +299,18 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
        return ret;
 }
 
-int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg,
+int smu_update_table(struct smu_context *smu, enum smu_table_id table_index,
                     void *table_data, bool drv2smu)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
        struct smu_table *table = NULL;
        int ret = 0;
-       uint32_t table_index;
+       int table_id = smu_table_get_index(smu, table_index);
 
        if (!table_data || table_id >= smu_table->table_count)
                return -EINVAL;
 
-       table_index = (exarg << 16) | table_id;
-
-       table = &smu_table->tables[table_id];
+       table = &smu_table->tables[table_index];
 
        if (drv2smu)
                memcpy(table->cpu_addr, table_data, table->size);
@@ -156,7 +326,7 @@ int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16
        ret = smu_send_smc_msg_with_param(smu, drv2smu ?
                                          SMU_MSG_TransferTableDram2Smu :
                                          SMU_MSG_TransferTableSmu2Dram,
-                                         table_index);
+                                         table_id);
        if (ret)
                return ret;
 
@@ -168,13 +338,12 @@ int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16
 
 bool is_support_sw_smu(struct amdgpu_device *adev)
 {
-       if (amdgpu_dpm != 1)
-               return false;
-
-       if (adev->asic_type >= CHIP_VEGA20 && adev->asic_type != CHIP_RAVEN)
+       if (adev->asic_type == CHIP_VEGA20)
+               return (amdgpu_dpm == 2) ? true : false;
+       else if (adev->asic_type >= CHIP_NAVI10)
                return true;
-
-       return false;
+       else
+               return false;
 }
 
 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
@@ -233,33 +402,36 @@ int smu_feature_init_dpm(struct smu_context *smu)
 {
        struct smu_feature *feature = &smu->smu_feature;
        int ret = 0;
-       uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];
+       uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
 
        if (!smu->pm_enabled)
                return ret;
        mutex_lock(&feature->mutex);
-       bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
+       bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
        mutex_unlock(&feature->mutex);
 
-       ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask,
+       ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
                                             SMU_FEATURE_MAX/32);
        if (ret)
                return ret;
 
        mutex_lock(&feature->mutex);
-       bitmap_andnot(feature->allowed, feature->allowed,
-                     (unsigned long *)unallowed_feature_mask,
+       bitmap_or(feature->allowed, feature->allowed,
+                     (unsigned long *)allowed_feature_mask,
                      feature->feature_num);
        mutex_unlock(&feature->mutex);
 
        return ret;
 }
 
-int smu_feature_is_enabled(struct smu_context *smu, int feature_id)
+int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
 {
        struct smu_feature *feature = &smu->smu_feature;
+       uint32_t feature_id;
        int ret = 0;
 
+       feature_id = smu_feature_get_index(smu, mask);
+
        WARN_ON(feature_id > feature->feature_num);
 
        mutex_lock(&feature->mutex);
@@ -269,11 +441,15 @@ int smu_feature_is_enabled(struct smu_context *smu, int feature_id)
        return ret;
 }
 
-int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable)
+int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
+                           bool enable)
 {
        struct smu_feature *feature = &smu->smu_feature;
+       uint32_t feature_id;
        int ret = 0;
 
+       feature_id = smu_feature_get_index(smu, mask);
+
        WARN_ON(feature_id > feature->feature_num);
 
        mutex_lock(&feature->mutex);
@@ -292,11 +468,14 @@ int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable
        return ret;
 }
 
-int smu_feature_is_supported(struct smu_context *smu, int feature_id)
+int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
 {
        struct smu_feature *feature = &smu->smu_feature;
+       uint32_t feature_id;
        int ret = 0;
 
+       feature_id = smu_feature_get_index(smu, mask);
+
        WARN_ON(feature_id > feature->feature_num);
 
        mutex_lock(&feature->mutex);
@@ -306,12 +485,16 @@ int smu_feature_is_supported(struct smu_context *smu, int feature_id)
        return ret;
 }
 
-int smu_feature_set_supported(struct smu_context *smu, int feature_id,
+int smu_feature_set_supported(struct smu_context *smu,
+                             enum smu_feature_mask mask,
                              bool enable)
 {
        struct smu_feature *feature = &smu->smu_feature;
+       uint32_t feature_id;
        int ret = 0;
 
+       feature_id = smu_feature_get_index(smu, mask);
+
        WARN_ON(feature_id > feature->feature_num);
 
        mutex_lock(&feature->mutex);
@@ -331,7 +514,6 @@ static int smu_set_funcs(struct amdgpu_device *adev)
        switch (adev->asic_type) {
        case CHIP_VEGA20:
        case CHIP_NAVI10:
-               adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
                if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
                        smu->od_enabled = true;
                smu_v11_0_set_smu_funcs(smu);
@@ -623,7 +805,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
                return 0;
        }
 
-       ret = smu_init_display(smu);
+       ret = smu_init_display_count(smu, 0);
        if (ret)
                return ret;
 
@@ -722,7 +904,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
                        return ret;
        }
 
-       ret = smu_set_od8_default_settings(smu, initialize);
+       ret = smu_set_default_od_settings(smu, initialize);
        if (ret)
                return ret;
 
@@ -824,8 +1006,6 @@ static int smu_hw_init(void *handle)
                }
        }
 
-       mutex_lock(&smu->mutex);
-
        ret = smu_feature_init_dpm(smu);
        if (ret)
                goto failed;
@@ -850,19 +1030,20 @@ static int smu_hw_init(void *handle)
        if (ret)
                goto failed;
 
-       mutex_unlock(&smu->mutex);
+       ret = smu_register_irq_handler(smu);
+       if (ret)
+               goto failed;
 
        if (!smu->pm_enabled)
                adev->pm.dpm_enabled = false;
        else
-               adev->pm.dpm_enabled = true;
+               adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
 
        pr_info("SMU is initialized successfully!\n");
 
        return 0;
 
 failed:
-       mutex_unlock(&smu->mutex);
        return ret;
 }
 
@@ -879,20 +1060,11 @@ static int smu_hw_fini(void *handle)
        kfree(table_context->max_sustainable_clocks);
        table_context->max_sustainable_clocks = NULL;
 
-       kfree(table_context->od_feature_capabilities);
-       table_context->od_feature_capabilities = NULL;
-
-       kfree(table_context->od_settings_max);
-       table_context->od_settings_max = NULL;
-
-       kfree(table_context->od_settings_min);
-       table_context->od_settings_min = NULL;
-
        kfree(table_context->overdrive_table);
        table_context->overdrive_table = NULL;
 
-       kfree(table_context->od8_settings);
-       table_context->od8_settings = NULL;
+       kfree(smu->irq_source);
+       smu->irq_source = NULL;
 
        ret = smu_fini_fb_allocations(smu);
        if (ret)
@@ -933,6 +1105,10 @@ static int smu_suspend(void *handle)
 
        smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
 
+       if (adev->asic_type >= CHIP_NAVI10 &&
+           adev->gfx.rlc.funcs->stop)
+               adev->gfx.rlc.funcs->stop(adev);
+
        return 0;
 }
 
@@ -1243,6 +1419,61 @@ int smu_handle_task(struct smu_context *smu,
        return ret;
 }
 
+enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
+{
+       struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+       if (!smu_dpm_ctx->dpm_context)
+               return -EINVAL;
+
+       mutex_lock(&(smu->mutex));
+       if (smu_dpm_ctx->dpm_level != smu_dpm_ctx->saved_dpm_level) {
+               smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
+       }
+       mutex_unlock(&(smu->mutex));
+
+       return smu_dpm_ctx->dpm_level;
+}
+
+int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+       int ret = 0;
+       int i;
+       struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+       if (!smu_dpm_ctx->dpm_context)
+               return -EINVAL;
+
+       for (i = 0; i < smu->adev->num_ip_blocks; i++) {
+               if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
+                       break;
+       }
+
+
+       smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
+       ret = smu_handle_task(smu, level,
+                             AMD_PP_TASK_READJUST_POWER_STATE);
+       if (ret)
+               return ret;
+
+       mutex_lock(&smu->mutex);
+       smu_dpm_ctx->dpm_level = level;
+       mutex_unlock(&smu->mutex);
+
+       return ret;
+}
+
+int smu_set_display_count(struct smu_context *smu, uint32_t count)
+{
+       int ret = 0;
+
+       mutex_lock(&smu->mutex);
+       ret = smu_init_display_count(smu, count);
+       mutex_unlock(&smu->mutex);
+
+       return ret;
+}
+
 const struct amd_ip_funcs smu_ip_funcs = {
        .name = "smu",
        .early_init = smu_early_init,