]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/amd/powerplay/smu_v11_0.c
drm/amd/powerplay: unified VRAM address for driver table interaction with SMU V2
[linux.git] / drivers / gpu / drm / amd / powerplay / smu_v11_0.c
index e4268a627effec922120236d5292803fc0c1a351..e804f98540278fbf5f789133994a51c380490857 100644 (file)
@@ -37,6 +37,7 @@
 #include "soc15_common.h"
 #include "atom.h"
 #include "amd_pcie.h"
+#include "amdgpu_ras.h"
 
 #include "asic_reg/thm/thm_11_0_2_offset.h"
 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
@@ -79,15 +80,13 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
        for (i = 0; i < timeout; i++) {
                cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
                if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
-                       break;
+                       return cur_value == 0x1 ? 0 : -EIO;
+
                udelay(1);
        }
 
        /* timeout means wrong logic */
-       if (i == timeout)
-               return -ETIME;
-
-       return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
+       return -ETIME;
 }
 
 int
@@ -103,9 +102,11 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
                return index;
 
        ret = smu_v11_0_wait_for_response(smu);
-       if (ret)
-               pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
-                      smu_get_message_name(smu, msg), index, param, ret);
+       if (ret) {
+               pr_err("Msg issuing pre-check failed and "
+                      "SMU may be not in the right state!\n");
+               return ret;
+       }
 
        WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 
@@ -449,8 +450,10 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
 
        kfree(smu_table->tables);
        kfree(smu_table->metrics_table);
+       kfree(smu_table->watermarks_table);
        smu_table->tables = NULL;
        smu_table->metrics_table = NULL;
+       smu_table->watermarks_table = NULL;
        smu_table->metrics_time = 0;
 
        ret = smu_v11_0_fini_dpm_context(smu);
@@ -773,6 +776,24 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
        return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
 }
 
+int smu_v11_0_set_driver_table_location(struct smu_context *smu)
+{
+       struct smu_table *driver_table = &smu->smu_table.driver_table;
+       int ret = 0;
+
+       if (driver_table->mc_address) {
+               ret = smu_send_smc_msg_with_param(smu,
+                               SMU_MSG_SetDriverDramAddrHigh,
+                               upper_32_bits(driver_table->mc_address));
+               if (!ret)
+                       ret = smu_send_smc_msg_with_param(smu,
+                               SMU_MSG_SetDriverDramAddrLow,
+                               lower_32_bits(driver_table->mc_address));
+       }
+
+       return ret;
+}
+
 int smu_v11_0_set_tool_table_location(struct smu_context *smu)
 {
        int ret = 0;
@@ -834,27 +855,33 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu,
                                      uint32_t *feature_mask, uint32_t num)
 {
        uint32_t feature_mask_high = 0, feature_mask_low = 0;
+       struct smu_feature *feature = &smu->smu_feature;
        int ret = 0;
 
        if (!feature_mask || num < 2)
                return -EINVAL;
 
-       ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
-       if (ret)
-               return ret;
-       ret = smu_read_smc_arg(smu, &feature_mask_high);
-       if (ret)
-               return ret;
+       if (bitmap_empty(feature->enabled, feature->feature_num)) {
+               ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
+               if (ret)
+                       return ret;
+               ret = smu_read_smc_arg(smu, &feature_mask_high);
+               if (ret)
+                       return ret;
 
-       ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
-       if (ret)
-               return ret;
-       ret = smu_read_smc_arg(smu, &feature_mask_low);
-       if (ret)
-               return ret;
+               ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
+               if (ret)
+                       return ret;
+               ret = smu_read_smc_arg(smu, &feature_mask_low);
+               if (ret)
+                       return ret;
 
-       feature_mask[0] = feature_mask_low;
-       feature_mask[1] = feature_mask_high;
+               feature_mask[0] = feature_mask_low;
+               feature_mask[1] = feature_mask_high;
+       } else {
+               bitmap_copy((unsigned long *)feature_mask, feature->enabled,
+                            feature->feature_num);
+       }
 
        return ret;
 }
@@ -866,21 +893,24 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
        uint32_t feature_mask[2];
        int ret = 0;
 
-       if (smu->pm_enabled) {
-               ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
-                                            SMU_MSG_DisableAllSmuFeatures));
-               if (ret)
-                       return ret;
-       }
-
-       ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+       ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+                                    SMU_MSG_DisableAllSmuFeatures));
        if (ret)
                return ret;
 
-       bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
-                   feature->feature_num);
-       bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
-                   feature->feature_num);
+       if (en) {
+               ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+               if (ret)
+                       return ret;
+
+               bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
+                           feature->feature_num);
+               bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
+                           feature->feature_num);
+       } else {
+               bitmap_zero(feature->enabled, feature->feature_num);
+               bitmap_zero(feature->supported, feature->feature_num);
+       }
 
        return ret;
 }
@@ -1617,7 +1647,9 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
        if (!baco_support)
                return false;
 
-       if (!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+       /* Arcturus does not support this bit mask */
+       if (smu_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+          !smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
                return false;
 
        val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
@@ -1643,6 +1675,10 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
 {
 
        struct smu_baco_context *smu_baco = &smu->smu_baco;
+       struct amdgpu_device *adev = smu->adev;
+       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+       uint32_t bif_doorbell_intr_cntl;
+       uint32_t data;
        int ret = 0;
 
        if (smu_v11_0_baco_get_state(smu) == state)
@@ -1650,10 +1686,37 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
 
        mutex_lock(&smu_baco->mutex);
 
-       if (state == SMU_BACO_STATE_ENTER)
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, BACO_SEQ_BACO);
-       else
+       bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+
+       if (state == SMU_BACO_STATE_ENTER) {
+               bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+                                               BIF_DOORBELL_INT_CNTL,
+                                               DOORBELL_INTERRUPT_DISABLE, 1);
+               WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+               if (!ras || !ras->supported) {
+                       data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
+                       data |= 0x80000000;
+                       WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
+
+                       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0);
+               } else {
+                       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1);
+               }
+       } else {
                ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
+               if (ret)
+                       goto out;
+
+               bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+                                               BIF_DOORBELL_INT_CNTL,
+                                               DOORBELL_INTERRUPT_DISABLE, 0);
+               WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+               /* clear vbios scratch 6 and 7 for coming asic reinit */
+               WREG32(adev->bios_scratch_reg_offset + 6, 0);
+               WREG32(adev->bios_scratch_reg_offset + 7, 0);
+       }
        if (ret)
                goto out;
 
@@ -1663,13 +1726,17 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
        return ret;
 }
 
-int smu_v11_0_baco_reset(struct smu_context *smu)
+int smu_v11_0_baco_enter(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
-       ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
-       if (ret)
-               return ret;
+       /* Arcturus does not need this audio workaround */
+       if (adev->asic_type != CHIP_ARCTURUS) {
+               ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
+               if (ret)
+                       return ret;
+       }
 
        ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
        if (ret)
@@ -1677,6 +1744,13 @@ int smu_v11_0_baco_reset(struct smu_context *smu)
 
        msleep(10);
 
+       return ret;
+}
+
+int smu_v11_0_baco_exit(struct smu_context *smu)
+{
+       int ret = 0;
+
        ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
        if (ret)
                return ret;
@@ -1815,3 +1889,42 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize,
        }
        return ret;
 }
+
+int smu_v11_0_set_performance_level(struct smu_context *smu,
+                                   enum amd_dpm_forced_level level)
+{
+       int ret = 0;
+       uint32_t sclk_mask, mclk_mask, soc_mask;
+
+       switch (level) {
+       case AMD_DPM_FORCED_LEVEL_HIGH:
+               ret = smu_force_dpm_limit_value(smu, true);
+               break;
+       case AMD_DPM_FORCED_LEVEL_LOW:
+               ret = smu_force_dpm_limit_value(smu, false);
+               break;
+       case AMD_DPM_FORCED_LEVEL_AUTO:
+       case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+               ret = smu_unforce_dpm_levels(smu);
+               break;
+       case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+       case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+       case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+               ret = smu_get_profiling_clk_mask(smu, level,
+                                                &sclk_mask,
+                                                &mclk_mask,
+                                                &soc_mask);
+               if (ret)
+                       return ret;
+               smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+               smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+               smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
+               break;
+       case AMD_DPM_FORCED_LEVEL_MANUAL:
+       case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+       default:
+               break;
+       }
+       return ret;
+}
+