]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/amd/powerplay/smu_v11_0.c
drm/amd/powerplay: unified VRAM address for driver table interaction with SMU V2
[linux.git] / drivers / gpu / drm / amd / powerplay / smu_v11_0.c
index eae3657cf1f386921b4f21c3def5588fac8b4f07..e804f98540278fbf5f789133994a51c380490857 100644 (file)
@@ -80,15 +80,13 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
        for (i = 0; i < timeout; i++) {
                cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
                if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
-                       break;
+                       return cur_value == 0x1 ? 0 : -EIO;
+
                udelay(1);
        }
 
        /* timeout means wrong logic */
-       if (i == timeout)
-               return -ETIME;
-
-       return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
+       return -ETIME;
 }
 
 int
@@ -104,9 +102,11 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
                return index;
 
        ret = smu_v11_0_wait_for_response(smu);
-       if (ret)
-               pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
-                      smu_get_message_name(smu, msg), index, param, ret);
+       if (ret) {
+               pr_err("Msg issuing pre-check failed and "
+                      "SMU may be not in the right state!\n");
+               return ret;
+       }
 
        WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 
@@ -450,8 +450,10 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
 
        kfree(smu_table->tables);
        kfree(smu_table->metrics_table);
+       kfree(smu_table->watermarks_table);
        smu_table->tables = NULL;
        smu_table->metrics_table = NULL;
+       smu_table->watermarks_table = NULL;
        smu_table->metrics_time = 0;
 
        ret = smu_v11_0_fini_dpm_context(smu);
@@ -774,6 +776,24 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
        return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
 }
 
+int smu_v11_0_set_driver_table_location(struct smu_context *smu)
+{
+       struct smu_table *driver_table = &smu->smu_table.driver_table;
+       int ret = 0;
+
+       if (driver_table->mc_address) {
+               ret = smu_send_smc_msg_with_param(smu,
+                               SMU_MSG_SetDriverDramAddrHigh,
+                               upper_32_bits(driver_table->mc_address));
+               if (!ret)
+                       ret = smu_send_smc_msg_with_param(smu,
+                               SMU_MSG_SetDriverDramAddrLow,
+                               lower_32_bits(driver_table->mc_address));
+       }
+
+       return ret;
+}
+
 int smu_v11_0_set_tool_table_location(struct smu_context *smu)
 {
        int ret = 0;
@@ -835,27 +855,33 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu,
                                      uint32_t *feature_mask, uint32_t num)
 {
        uint32_t feature_mask_high = 0, feature_mask_low = 0;
+       struct smu_feature *feature = &smu->smu_feature;
        int ret = 0;
 
        if (!feature_mask || num < 2)
                return -EINVAL;
 
-       ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
-       if (ret)
-               return ret;
-       ret = smu_read_smc_arg(smu, &feature_mask_high);
-       if (ret)
-               return ret;
+       if (bitmap_empty(feature->enabled, feature->feature_num)) {
+               ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
+               if (ret)
+                       return ret;
+               ret = smu_read_smc_arg(smu, &feature_mask_high);
+               if (ret)
+                       return ret;
 
-       ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
-       if (ret)
-               return ret;
-       ret = smu_read_smc_arg(smu, &feature_mask_low);
-       if (ret)
-               return ret;
+               ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
+               if (ret)
+                       return ret;
+               ret = smu_read_smc_arg(smu, &feature_mask_low);
+               if (ret)
+                       return ret;
 
-       feature_mask[0] = feature_mask_low;
-       feature_mask[1] = feature_mask_high;
+               feature_mask[0] = feature_mask_low;
+               feature_mask[1] = feature_mask_high;
+       } else {
+               bitmap_copy((unsigned long *)feature_mask, feature->enabled,
+                            feature->feature_num);
+       }
 
        return ret;
 }
@@ -867,21 +893,24 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
        uint32_t feature_mask[2];
        int ret = 0;
 
-       if (smu->pm_enabled) {
-               ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
-                                            SMU_MSG_DisableAllSmuFeatures));
-               if (ret)
-                       return ret;
-       }
-
-       ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+       ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+                                    SMU_MSG_DisableAllSmuFeatures));
        if (ret)
                return ret;
 
-       bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
-                   feature->feature_num);
-       bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
-                   feature->feature_num);
+       if (en) {
+               ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+               if (ret)
+                       return ret;
+
+               bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
+                           feature->feature_num);
+               bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
+                           feature->feature_num);
+       } else {
+               bitmap_zero(feature->enabled, feature->feature_num);
+               bitmap_zero(feature->supported, feature->feature_num);
+       }
 
        return ret;
 }
@@ -1676,10 +1705,17 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
                }
        } else {
                ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
+               if (ret)
+                       goto out;
+
                bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
                                                BIF_DOORBELL_INT_CNTL,
                                                DOORBELL_INTERRUPT_DISABLE, 0);
                WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+               /* clear vbios scratch 6 and 7 for coming asic reinit */
+               WREG32(adev->bios_scratch_reg_offset + 6, 0);
+               WREG32(adev->bios_scratch_reg_offset + 7, 0);
        }
        if (ret)
                goto out;
@@ -1853,3 +1889,42 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize,
        }
        return ret;
 }
+
+int smu_v11_0_set_performance_level(struct smu_context *smu,
+                                   enum amd_dpm_forced_level level)
+{
+       int ret = 0;
+       uint32_t sclk_mask, mclk_mask, soc_mask;
+
+       switch (level) {
+       case AMD_DPM_FORCED_LEVEL_HIGH:
+               ret = smu_force_dpm_limit_value(smu, true);
+               break;
+       case AMD_DPM_FORCED_LEVEL_LOW:
+               ret = smu_force_dpm_limit_value(smu, false);
+               break;
+       case AMD_DPM_FORCED_LEVEL_AUTO:
+       case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+               ret = smu_unforce_dpm_levels(smu);
+               break;
+       case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+       case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+       case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+               ret = smu_get_profiling_clk_mask(smu, level,
+                                                &sclk_mask,
+                                                &mclk_mask,
+                                                &soc_mask);
+               if (ret)
+                       return ret;
+               smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+               smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+               smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
+               break;
+       case AMD_DPM_FORCED_LEVEL_MANUAL:
+       case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+       default:
+               break;
+       }
+       return ret;
+}
+