2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "soc15_common.h"
30 #include "smu_v11_0.h"
31 #include "smu_v12_0.h"
34 #include "vega20_ppt.h"
35 #include "arcturus_ppt.h"
36 #include "navi10_ppt.h"
37 #include "renoir_ppt.h"
39 #undef __SMU_DUMMY_MAP
40 #define __SMU_DUMMY_MAP(type) #type
41 static const char* __smu_message_names[] = {
45 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
47 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
48 return "unknown smu message";
49 return __smu_message_names[type];
52 #undef __SMU_DUMMY_MAP
53 #define __SMU_DUMMY_MAP(fea) #fea
54 static const char* __smu_feature_names[] = {
58 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
60 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
61 return "unknown smu feature";
62 return __smu_feature_names[feature];
65 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
69 uint32_t feature_mask[2] = { 0 };
70 int32_t feature_index = 0;
72 uint32_t sort_feature[SMU_FEATURE_COUNT];
73 uint64_t hw_feature_count = 0;
75 mutex_lock(&smu->mutex);
77 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
81 size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
82 feature_mask[1], feature_mask[0]);
84 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
85 feature_index = smu_feature_get_index(smu, i);
86 if (feature_index < 0)
88 sort_feature[feature_index] = i;
92 for (i = 0; i < hw_feature_count; i++) {
93 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
95 smu_get_feature_name(smu, sort_feature[i]),
97 !!smu_feature_is_enabled(smu, sort_feature[i]) ?
98 "enabled" : "disabled");
102 mutex_unlock(&smu->mutex);
107 static int smu_feature_update_enable_state(struct smu_context *smu,
108 uint64_t feature_mask,
111 struct smu_feature *feature = &smu->smu_feature;
112 uint32_t feature_low = 0, feature_high = 0;
115 if (!smu->pm_enabled)
118 feature_low = (feature_mask >> 0 ) & 0xffffffff;
119 feature_high = (feature_mask >> 32) & 0xffffffff;
122 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
126 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
131 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
135 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
141 mutex_lock(&feature->mutex);
143 bitmap_or(feature->enabled, feature->enabled,
144 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
146 bitmap_andnot(feature->enabled, feature->enabled,
147 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
148 mutex_unlock(&feature->mutex);
153 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
156 uint32_t feature_mask[2] = { 0 };
157 uint64_t feature_2_enabled = 0;
158 uint64_t feature_2_disabled = 0;
159 uint64_t feature_enables = 0;
161 mutex_lock(&smu->mutex);
163 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
167 feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
169 feature_2_enabled = ~feature_enables & new_mask;
170 feature_2_disabled = feature_enables & ~new_mask;
172 if (feature_2_enabled) {
173 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
177 if (feature_2_disabled) {
178 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
184 mutex_unlock(&smu->mutex);
189 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
193 if (!if_version && !smu_version)
197 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
201 ret = smu_read_smc_arg(smu, if_version);
207 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
211 ret = smu_read_smc_arg(smu, smu_version);
219 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
220 uint32_t min, uint32_t max)
224 if (min <= 0 && max <= 0)
227 if (!smu_clk_dpm_is_enabled(smu, clk_type))
230 ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
234 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
235 uint32_t min, uint32_t max)
237 int ret = 0, clk_id = 0;
240 if (min <= 0 && max <= 0)
243 if (!smu_clk_dpm_is_enabled(smu, clk_type))
246 clk_id = smu_clk_get_index(smu, clk_type);
251 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
252 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
259 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
260 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
270 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
271 uint32_t *min, uint32_t *max, bool lock_needed)
273 uint32_t clock_limit;
280 mutex_lock(&smu->mutex);
282 if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
286 clock_limit = smu->smu_table.boot_values.uclk;
290 clock_limit = smu->smu_table.boot_values.gfxclk;
293 clock_limit = smu->smu_table.boot_values.socclk;
300 /* clock in Mhz unit */
302 *min = clock_limit / 100;
304 *max = clock_limit / 100;
307 * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
308 * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
310 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
314 mutex_unlock(&smu->mutex);
319 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
320 uint16_t level, uint32_t *value)
322 int ret = 0, clk_id = 0;
328 if (!smu_clk_dpm_is_enabled(smu, clk_type))
331 clk_id = smu_clk_get_index(smu, clk_type);
335 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
337 ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
342 ret = smu_read_smc_arg(smu, ¶m);
346 /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
347 * now, we un-support it */
348 *value = param & 0x7fffffff;
353 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
356 return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
359 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
361 enum smu_feature_mask feature_id = 0;
366 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
370 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
373 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
379 if(!smu_feature_is_enabled(smu, feature_id)) {
387 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
389 * @smu: smu_context pointer
390 * @block_type: the IP block to power gate/ungate
391 * @gate: to power gate if true, ungate otherwise
393 * This API uses no smu->mutex lock protection due to:
394 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
395 * This is guarded to be race condition free by the caller.
396 * 2. Or get called on user setting request of power_dpm_force_performance_level.
397 * Under this case, the smu->mutex lock protection is already enforced on
398 * the parent API smu_force_performance_level of the call path.
400 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
405 switch (block_type) {
406 case AMD_IP_BLOCK_TYPE_UVD:
407 ret = smu_dpm_set_uvd_enable(smu, gate);
409 case AMD_IP_BLOCK_TYPE_VCE:
410 ret = smu_dpm_set_vce_enable(smu, gate);
412 case AMD_IP_BLOCK_TYPE_GFX:
413 ret = smu_gfx_off_control(smu, gate);
415 case AMD_IP_BLOCK_TYPE_SDMA:
416 ret = smu_powergate_sdma(smu, gate);
425 int smu_get_power_num_states(struct smu_context *smu,
426 struct pp_states_info *state_info)
431 /* not support power state */
432 memset(state_info, 0, sizeof(struct pp_states_info));
433 state_info->nums = 1;
434 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
439 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
440 void *data, uint32_t *size)
442 struct smu_power_context *smu_power = &smu->smu_power;
443 struct smu_power_gate *power_gate = &smu_power->power_gate;
450 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
451 *((uint32_t *)data) = smu->pstate_sclk;
454 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
455 *((uint32_t *)data) = smu->pstate_mclk;
458 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
459 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
462 case AMDGPU_PP_SENSOR_UVD_POWER:
463 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
466 case AMDGPU_PP_SENSOR_VCE_POWER:
467 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
470 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
471 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
485 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
486 void *table_data, bool drv2smu)
488 struct smu_table_context *smu_table = &smu->smu_table;
489 struct amdgpu_device *adev = smu->adev;
490 struct smu_table *table = NULL;
492 int table_id = smu_table_get_index(smu, table_index);
494 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
497 table = &smu_table->tables[table_index];
500 memcpy(table->cpu_addr, table_data, table->size);
502 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
503 upper_32_bits(table->mc_address));
506 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
507 lower_32_bits(table->mc_address));
510 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
511 SMU_MSG_TransferTableDram2Smu :
512 SMU_MSG_TransferTableSmu2Dram,
513 table_id | ((argument & 0xFFFF) << 16));
517 /* flush hdp cache */
518 adev->nbio.funcs->hdp_flush(adev, NULL);
521 memcpy(table_data, table->cpu_addr, table->size);
526 bool is_support_sw_smu(struct amdgpu_device *adev)
528 if (adev->asic_type == CHIP_VEGA20)
529 return (amdgpu_dpm == 2) ? true : false;
530 else if (adev->asic_type >= CHIP_ARCTURUS)
536 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
538 if (!is_support_sw_smu(adev))
541 if (adev->asic_type == CHIP_VEGA20)
547 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
549 struct smu_table_context *smu_table = &smu->smu_table;
550 uint32_t powerplay_table_size;
552 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
555 mutex_lock(&smu->mutex);
557 if (smu_table->hardcode_pptable)
558 *table = smu_table->hardcode_pptable;
560 *table = smu_table->power_play_table;
562 powerplay_table_size = smu_table->power_play_table_size;
564 mutex_unlock(&smu->mutex);
566 return powerplay_table_size;
569 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
571 struct smu_table_context *smu_table = &smu->smu_table;
572 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
575 if (!smu->pm_enabled)
577 if (header->usStructureSize != size) {
578 pr_err("pp table size not matched !\n");
582 mutex_lock(&smu->mutex);
583 if (!smu_table->hardcode_pptable)
584 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
585 if (!smu_table->hardcode_pptable) {
590 memcpy(smu_table->hardcode_pptable, buf, size);
591 smu_table->power_play_table = smu_table->hardcode_pptable;
592 smu_table->power_play_table_size = size;
595 * Special hw_fini action(for Navi1x, the DPMs disablement will be
596 * skipped) may be needed for custom pptable uploading.
598 smu->uploading_custom_pp_table = true;
600 ret = smu_reset(smu);
602 pr_info("smu reset failed, ret = %d\n", ret);
604 smu->uploading_custom_pp_table = false;
607 mutex_unlock(&smu->mutex);
611 int smu_feature_init_dpm(struct smu_context *smu)
613 struct smu_feature *feature = &smu->smu_feature;
615 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
617 if (!smu->pm_enabled)
619 mutex_lock(&feature->mutex);
620 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
621 mutex_unlock(&feature->mutex);
623 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
628 mutex_lock(&feature->mutex);
629 bitmap_or(feature->allowed, feature->allowed,
630 (unsigned long *)allowed_feature_mask,
631 feature->feature_num);
632 mutex_unlock(&feature->mutex);
638 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
640 struct amdgpu_device *adev = smu->adev;
641 struct smu_feature *feature = &smu->smu_feature;
645 if (adev->flags & AMD_IS_APU)
648 feature_id = smu_feature_get_index(smu, mask);
652 WARN_ON(feature_id > feature->feature_num);
654 mutex_lock(&feature->mutex);
655 ret = test_bit(feature_id, feature->enabled);
656 mutex_unlock(&feature->mutex);
661 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
664 struct smu_feature *feature = &smu->smu_feature;
667 feature_id = smu_feature_get_index(smu, mask);
671 WARN_ON(feature_id > feature->feature_num);
673 return smu_feature_update_enable_state(smu,
678 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
680 struct smu_feature *feature = &smu->smu_feature;
684 feature_id = smu_feature_get_index(smu, mask);
688 WARN_ON(feature_id > feature->feature_num);
690 mutex_lock(&feature->mutex);
691 ret = test_bit(feature_id, feature->supported);
692 mutex_unlock(&feature->mutex);
697 int smu_feature_set_supported(struct smu_context *smu,
698 enum smu_feature_mask mask,
701 struct smu_feature *feature = &smu->smu_feature;
705 feature_id = smu_feature_get_index(smu, mask);
709 WARN_ON(feature_id > feature->feature_num);
711 mutex_lock(&feature->mutex);
713 test_and_set_bit(feature_id, feature->supported);
715 test_and_clear_bit(feature_id, feature->supported);
716 mutex_unlock(&feature->mutex);
721 static int smu_set_funcs(struct amdgpu_device *adev)
723 struct smu_context *smu = &adev->smu;
725 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
726 smu->od_enabled = true;
728 switch (adev->asic_type) {
730 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
731 vega20_set_ppt_funcs(smu);
736 navi10_set_ppt_funcs(smu);
739 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
740 arcturus_set_ppt_funcs(smu);
741 /* OD is not supported on Arcturus */
742 smu->od_enabled =false;
745 renoir_set_ppt_funcs(smu);
754 static int smu_early_init(void *handle)
756 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
757 struct smu_context *smu = &adev->smu;
760 smu->pm_enabled = !!amdgpu_dpm;
762 mutex_init(&smu->mutex);
764 return smu_set_funcs(adev);
767 static int smu_late_init(void *handle)
769 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
770 struct smu_context *smu = &adev->smu;
772 if (!smu->pm_enabled)
775 smu_handle_task(&adev->smu,
776 smu->smu_dpm.dpm_level,
777 AMD_PP_TASK_COMPLETE_INIT,
783 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
784 uint16_t *size, uint8_t *frev, uint8_t *crev,
787 struct amdgpu_device *adev = smu->adev;
790 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
791 size, frev, crev, &data_start))
794 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
799 static int smu_initialize_pptable(struct smu_context *smu)
805 static int smu_smc_table_sw_init(struct smu_context *smu)
809 ret = smu_initialize_pptable(smu);
811 pr_err("Failed to init smu_initialize_pptable!\n");
816 * Create smu_table structure, and init smc tables such as
817 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
819 ret = smu_init_smc_tables(smu);
821 pr_err("Failed to init smc tables!\n");
826 * Create smu_power_context structure, and allocate smu_dpm_context and
827 * context size to fill the smu_power_context data.
829 ret = smu_init_power(smu);
831 pr_err("Failed to init smu_init_power!\n");
838 static int smu_smc_table_sw_fini(struct smu_context *smu)
842 ret = smu_fini_smc_tables(smu);
844 pr_err("Failed to smu_fini_smc_tables!\n");
851 static int smu_sw_init(void *handle)
853 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
854 struct smu_context *smu = &adev->smu;
857 smu->pool_size = adev->pm.smu_prv_buffer_size;
858 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
859 mutex_init(&smu->smu_feature.mutex);
860 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
861 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
862 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
864 mutex_init(&smu->smu_baco.mutex);
865 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
866 smu->smu_baco.platform_support = false;
868 mutex_init(&smu->sensor_lock);
870 smu->watermarks_bitmap = 0;
871 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
872 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
874 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
875 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
876 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
877 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
878 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
879 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
880 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
881 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
883 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
884 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
885 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
886 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
887 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
888 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
889 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
890 smu->display_config = &adev->pm.pm_display_cfg;
892 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
893 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
894 ret = smu_init_microcode(smu);
896 pr_err("Failed to load smu firmware!\n");
900 ret = smu_smc_table_sw_init(smu);
902 pr_err("Failed to sw init smc table!\n");
906 ret = smu_register_irq_handler(smu);
908 pr_err("Failed to register smc irq handler!\n");
915 static int smu_sw_fini(void *handle)
917 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
918 struct smu_context *smu = &adev->smu;
921 kfree(smu->irq_source);
922 smu->irq_source = NULL;
924 ret = smu_smc_table_sw_fini(smu);
926 pr_err("Failed to sw fini smc table!\n");
930 ret = smu_fini_power(smu);
932 pr_err("Failed to init smu_fini_power!\n");
939 static int smu_init_fb_allocations(struct smu_context *smu)
941 struct amdgpu_device *adev = smu->adev;
942 struct smu_table_context *smu_table = &smu->smu_table;
943 struct smu_table *tables = smu_table->tables;
946 for (i = 0; i < SMU_TABLE_COUNT; i++) {
947 if (tables[i].size == 0)
949 ret = amdgpu_bo_create_kernel(adev,
954 &tables[i].mc_address,
955 &tables[i].cpu_addr);
963 if (tables[i].size == 0)
965 amdgpu_bo_free_kernel(&tables[i].bo,
966 &tables[i].mc_address,
967 &tables[i].cpu_addr);
973 static int smu_fini_fb_allocations(struct smu_context *smu)
975 struct smu_table_context *smu_table = &smu->smu_table;
976 struct smu_table *tables = smu_table->tables;
982 for (i = 0; i < SMU_TABLE_COUNT; i++) {
983 if (tables[i].size == 0)
985 amdgpu_bo_free_kernel(&tables[i].bo,
986 &tables[i].mc_address,
987 &tables[i].cpu_addr);
993 static int smu_smc_table_hw_init(struct smu_context *smu,
996 struct amdgpu_device *adev = smu->adev;
999 if (smu_is_dpm_running(smu) && adev->in_suspend) {
1000 pr_info("dpm has been enabled\n");
1004 if (adev->asic_type != CHIP_ARCTURUS) {
1005 ret = smu_init_display_count(smu, 0);
1011 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1012 ret = smu_get_vbios_bootup_values(smu);
1016 ret = smu_setup_pptable(smu);
1020 ret = smu_get_clk_info_from_vbios(smu);
1025 * check if the format_revision in vbios is up to pptable header
1026 * version, and the structure size is not 0.
1028 ret = smu_check_pptable(smu);
1033 * allocate vram bos to store smc table contents.
1035 ret = smu_init_fb_allocations(smu);
1040 * Parse pptable format and fill PPTable_t smc_pptable to
1041 * smu_table_context structure. And read the smc_dpm_table from vbios,
1042 * then fill it into smc_pptable.
1044 ret = smu_parse_pptable(smu);
1049 * Send msg GetDriverIfVersion to check if the return value is equal
1050 * with DRIVER_IF_VERSION of smc header.
1052 ret = smu_check_fw_version(smu);
1057 /* smu_dump_pptable(smu); */
1060 * Copy pptable bo in the vram to smc with SMU MSGs such as
1061 * SetDriverDramAddr and TransferTableDram2Smu.
1063 ret = smu_write_pptable(smu);
1067 /* issue Run*Btc msg */
1068 ret = smu_run_btc(smu);
1072 ret = smu_feature_set_allowed_mask(smu);
1076 ret = smu_system_features_control(smu, true);
1080 if (adev->asic_type != CHIP_ARCTURUS) {
1081 ret = smu_notify_display_change(smu);
1086 * Set min deep sleep dce fclk with bootup value from vbios via
1087 * SetMinDeepSleepDcefclk MSG.
1089 ret = smu_set_min_dcef_deep_sleep(smu);
1095 * Set initialized values (get from vbios) to dpm tables context such as
1096 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1100 ret = smu_populate_smc_tables(smu);
1104 ret = smu_init_max_sustainable_clocks(smu);
1109 if (adev->asic_type != CHIP_ARCTURUS) {
1110 ret = smu_override_pcie_parameters(smu);
1115 ret = smu_set_default_od_settings(smu, initialize);
1120 ret = smu_populate_umd_state_clk(smu);
1124 ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
1130 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1132 ret = smu_set_tool_table_location(smu);
1134 if (!smu_is_dpm_running(smu))
1135 pr_info("dpm has been disabled\n");
1141 * smu_alloc_memory_pool - allocate memory pool in the system memory
1143 * @smu: amdgpu_device pointer
1145 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1146 * and DramLogSetDramAddr can notify it changed.
1148 * Returns 0 on success, error on failure.
1150 static int smu_alloc_memory_pool(struct smu_context *smu)
1152 struct amdgpu_device *adev = smu->adev;
1153 struct smu_table_context *smu_table = &smu->smu_table;
1154 struct smu_table *memory_pool = &smu_table->memory_pool;
1155 uint64_t pool_size = smu->pool_size;
1158 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1161 memory_pool->size = pool_size;
1162 memory_pool->align = PAGE_SIZE;
1163 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1165 switch (pool_size) {
1166 case SMU_MEMORY_POOL_SIZE_256_MB:
1167 case SMU_MEMORY_POOL_SIZE_512_MB:
1168 case SMU_MEMORY_POOL_SIZE_1_GB:
1169 case SMU_MEMORY_POOL_SIZE_2_GB:
1170 ret = amdgpu_bo_create_kernel(adev,
1173 memory_pool->domain,
1175 &memory_pool->mc_address,
1176 &memory_pool->cpu_addr);
1185 static int smu_free_memory_pool(struct smu_context *smu)
1187 struct smu_table_context *smu_table = &smu->smu_table;
1188 struct smu_table *memory_pool = &smu_table->memory_pool;
1191 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1194 amdgpu_bo_free_kernel(&memory_pool->bo,
1195 &memory_pool->mc_address,
1196 &memory_pool->cpu_addr);
1198 memset(memory_pool, 0, sizeof(struct smu_table));
1203 static int smu_start_smc_engine(struct smu_context *smu)
1205 struct amdgpu_device *adev = smu->adev;
1208 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1209 if (adev->asic_type < CHIP_NAVI10) {
1210 if (smu->ppt_funcs->load_microcode) {
1211 ret = smu->ppt_funcs->load_microcode(smu);
1218 if (smu->ppt_funcs->check_fw_status) {
1219 ret = smu->ppt_funcs->check_fw_status(smu);
1221 pr_err("SMC is not ready\n");
1227 static int smu_hw_init(void *handle)
1230 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1231 struct smu_context *smu = &adev->smu;
1233 ret = smu_start_smc_engine(smu);
1235 pr_err("SMU is not ready yet!\n");
1239 if (adev->flags & AMD_IS_APU) {
1240 smu_powergate_sdma(&adev->smu, false);
1241 smu_powergate_vcn(&adev->smu, false);
1242 smu_set_gfx_cgpg(&adev->smu, true);
1245 if (!smu->pm_enabled)
1248 ret = smu_feature_init_dpm(smu);
1252 ret = smu_smc_table_hw_init(smu, true);
1256 ret = smu_alloc_memory_pool(smu);
1261 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1264 ret = smu_notify_memory_pool_location(smu);
1268 ret = smu_start_thermal_control(smu);
1272 if (!smu->pm_enabled)
1273 adev->pm.dpm_enabled = false;
1275 adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1277 pr_info("SMU is initialized successfully!\n");
1285 static int smu_stop_dpms(struct smu_context *smu)
1287 return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
1290 static int smu_hw_fini(void *handle)
1292 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1293 struct smu_context *smu = &adev->smu;
1294 struct smu_table_context *table_context = &smu->smu_table;
1297 if (adev->flags & AMD_IS_APU) {
1298 smu_powergate_sdma(&adev->smu, true);
1299 smu_powergate_vcn(&adev->smu, true);
1302 ret = smu_stop_thermal_control(smu);
1304 pr_warn("Fail to stop thermal control!\n");
1309 * For custom pptable uploading, skip the DPM features
1310 * disable process on Navi1x ASICs.
1311 * - As the gfx related features are under control of
1312 * RLC on those ASICs. RLC reinitialization will be
1313 * needed to reenable them. That will cost much more
1316 * - SMU firmware can handle the DPM reenablement
1319 if (!smu->uploading_custom_pp_table ||
1320 !((adev->asic_type >= CHIP_NAVI10) &&
1321 (adev->asic_type <= CHIP_NAVI12))) {
1322 ret = smu_stop_dpms(smu);
1324 pr_warn("Fail to stop Dpms!\n");
1329 kfree(table_context->driver_pptable);
1330 table_context->driver_pptable = NULL;
1332 kfree(table_context->max_sustainable_clocks);
1333 table_context->max_sustainable_clocks = NULL;
1335 kfree(table_context->overdrive_table);
1336 table_context->overdrive_table = NULL;
1338 ret = smu_fini_fb_allocations(smu);
1342 ret = smu_free_memory_pool(smu);
1349 int smu_reset(struct smu_context *smu)
1351 struct amdgpu_device *adev = smu->adev;
1354 ret = smu_hw_fini(adev);
1358 ret = smu_hw_init(adev);
1365 static int smu_suspend(void *handle)
1368 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1369 struct smu_context *smu = &adev->smu;
1370 bool baco_feature_is_enabled = false;
1372 if(!(adev->flags & AMD_IS_APU))
1373 baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
1375 ret = smu_system_features_control(smu, false);
1379 if (adev->in_gpu_reset && baco_feature_is_enabled) {
1380 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1382 pr_warn("set BACO feature enabled failed, return %d\n", ret);
1387 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1389 if (adev->asic_type >= CHIP_NAVI10 &&
1390 adev->gfx.rlc.funcs->stop)
1391 adev->gfx.rlc.funcs->stop(adev);
1393 smu_set_gfx_cgpg(&adev->smu, false);
1398 static int smu_resume(void *handle)
1401 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1402 struct smu_context *smu = &adev->smu;
1404 pr_info("SMU is resuming...\n");
1406 ret = smu_start_smc_engine(smu);
1408 pr_err("SMU is not ready yet!\n");
1412 ret = smu_smc_table_hw_init(smu, false);
1416 ret = smu_start_thermal_control(smu);
1421 smu_set_gfx_cgpg(&adev->smu, true);
1423 smu->disable_uclk_switch = 0;
1425 pr_info("SMU is resumed successfully!\n");
1433 int smu_display_configuration_change(struct smu_context *smu,
1434 const struct amd_pp_display_configuration *display_config)
1437 int num_of_active_display = 0;
1439 if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1442 if (!display_config)
1445 mutex_lock(&smu->mutex);
1447 if (smu->ppt_funcs->set_deep_sleep_dcefclk)
1448 smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
1449 display_config->min_dcef_deep_sleep_set_clk / 100);
1451 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1452 if (display_config->displays[index].controller_id != 0)
1453 num_of_active_display++;
1456 smu_set_active_display_count(smu, num_of_active_display);
1458 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1459 display_config->cpu_cc6_disable,
1460 display_config->cpu_pstate_disable,
1461 display_config->nb_pstate_switch_disable);
1463 mutex_unlock(&smu->mutex);
1468 static int smu_get_clock_info(struct smu_context *smu,
1469 struct smu_clock_info *clk_info,
1470 enum smu_perf_level_designation designation)
1473 struct smu_performance_level level = {0};
1478 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1482 clk_info->min_mem_clk = level.memory_clock;
1483 clk_info->min_eng_clk = level.core_clock;
1484 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1486 ret = smu_get_perf_level(smu, designation, &level);
1490 clk_info->min_mem_clk = level.memory_clock;
1491 clk_info->min_eng_clk = level.core_clock;
1492 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1497 int smu_get_current_clocks(struct smu_context *smu,
1498 struct amd_pp_clock_info *clocks)
1500 struct amd_pp_simple_clock_info simple_clocks = {0};
1501 struct smu_clock_info hw_clocks;
1504 if (!is_support_sw_smu(smu->adev))
1507 mutex_lock(&smu->mutex);
1509 smu_get_dal_power_level(smu, &simple_clocks);
1511 if (smu->support_power_containment)
1512 ret = smu_get_clock_info(smu, &hw_clocks,
1513 PERF_LEVEL_POWER_CONTAINMENT);
1515 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1518 pr_err("Error in smu_get_clock_info\n");
1522 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1523 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1524 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1525 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1526 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1527 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1528 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1529 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1531 if (simple_clocks.level == 0)
1532 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1534 clocks->max_clocks_state = simple_clocks.level;
1536 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1537 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1538 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1542 mutex_unlock(&smu->mutex);
1546 static int smu_set_clockgating_state(void *handle,
1547 enum amd_clockgating_state state)
1552 static int smu_set_powergating_state(void *handle,
1553 enum amd_powergating_state state)
1558 static int smu_enable_umd_pstate(void *handle,
1559 enum amd_dpm_forced_level *level)
1561 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1562 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1563 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1564 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1566 struct smu_context *smu = (struct smu_context*)(handle);
1567 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1569 if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
1572 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1573 /* enter umd pstate, save current level, disable gfx cg*/
1574 if (*level & profile_mode_mask) {
1575 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1576 smu_dpm_ctx->enable_umd_pstate = true;
1577 amdgpu_device_ip_set_clockgating_state(smu->adev,
1578 AMD_IP_BLOCK_TYPE_GFX,
1579 AMD_CG_STATE_UNGATE);
1580 amdgpu_device_ip_set_powergating_state(smu->adev,
1581 AMD_IP_BLOCK_TYPE_GFX,
1582 AMD_PG_STATE_UNGATE);
1585 /* exit umd pstate, restore level, enable gfx cg*/
1586 if (!(*level & profile_mode_mask)) {
1587 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1588 *level = smu_dpm_ctx->saved_dpm_level;
1589 smu_dpm_ctx->enable_umd_pstate = false;
1590 amdgpu_device_ip_set_clockgating_state(smu->adev,
1591 AMD_IP_BLOCK_TYPE_GFX,
1593 amdgpu_device_ip_set_powergating_state(smu->adev,
1594 AMD_IP_BLOCK_TYPE_GFX,
1602 static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1605 uint32_t sclk_mask, mclk_mask, soc_mask;
1608 case AMD_DPM_FORCED_LEVEL_HIGH:
1609 ret = smu_force_dpm_limit_value(smu, true);
1611 case AMD_DPM_FORCED_LEVEL_LOW:
1612 ret = smu_force_dpm_limit_value(smu, false);
1614 case AMD_DPM_FORCED_LEVEL_AUTO:
1615 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1616 ret = smu_unforce_dpm_levels(smu);
1618 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1619 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1620 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1621 ret = smu_get_profiling_clk_mask(smu, level,
1627 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
1628 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
1629 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
1631 case AMD_DPM_FORCED_LEVEL_MANUAL:
1632 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1639 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1640 enum amd_dpm_forced_level level,
1641 bool skip_display_settings)
1646 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1648 if (!smu->pm_enabled)
1651 if (!skip_display_settings) {
1652 ret = smu_display_config_changed(smu);
1654 pr_err("Failed to change display config!");
1659 ret = smu_apply_clocks_adjust_rules(smu);
1661 pr_err("Failed to apply clocks adjust rules!");
1665 if (!skip_display_settings) {
1666 ret = smu_notify_smc_dispaly_config(smu);
1668 pr_err("Failed to notify smc display config!");
1673 if (smu_dpm_ctx->dpm_level != level) {
1674 ret = smu_asic_set_performance_level(smu, level);
1676 ret = smu_default_set_performance_level(smu, level);
1678 pr_err("Failed to set performance level!");
1683 /* update the saved copy */
1684 smu_dpm_ctx->dpm_level = level;
1687 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1688 index = fls(smu->workload_mask);
1689 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1690 workload = smu->workload_setting[index];
1692 if (smu->power_profile_mode != workload)
1693 smu_set_power_profile_mode(smu, &workload, 0, false);
1699 int smu_handle_task(struct smu_context *smu,
1700 enum amd_dpm_forced_level level,
1701 enum amd_pp_task task_id,
1707 mutex_lock(&smu->mutex);
1710 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1711 ret = smu_pre_display_config_changed(smu);
1714 ret = smu_set_cpu_power_state(smu);
1717 ret = smu_adjust_power_state_dynamic(smu, level, false);
1719 case AMD_PP_TASK_COMPLETE_INIT:
1720 case AMD_PP_TASK_READJUST_POWER_STATE:
1721 ret = smu_adjust_power_state_dynamic(smu, level, true);
1729 mutex_unlock(&smu->mutex);
1734 int smu_switch_power_profile(struct smu_context *smu,
1735 enum PP_SMC_POWER_PROFILE type,
1738 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1742 if (!smu->pm_enabled)
1745 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1748 mutex_lock(&smu->mutex);
1751 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1752 index = fls(smu->workload_mask);
1753 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1754 workload = smu->workload_setting[index];
1756 smu->workload_mask |= (1 << smu->workload_prority[type]);
1757 index = fls(smu->workload_mask);
1758 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1759 workload = smu->workload_setting[index];
1762 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1763 smu_set_power_profile_mode(smu, &workload, 0, false);
1765 mutex_unlock(&smu->mutex);
1770 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1772 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1773 enum amd_dpm_forced_level level;
1775 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1778 mutex_lock(&(smu->mutex));
1779 level = smu_dpm_ctx->dpm_level;
1780 mutex_unlock(&(smu->mutex));
1785 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1787 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1790 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1793 mutex_lock(&smu->mutex);
1795 ret = smu_enable_umd_pstate(smu, &level);
1797 mutex_unlock(&smu->mutex);
1801 ret = smu_handle_task(smu, level,
1802 AMD_PP_TASK_READJUST_POWER_STATE,
1805 mutex_unlock(&smu->mutex);
1810 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1814 mutex_lock(&smu->mutex);
1815 ret = smu_init_display_count(smu, count);
1816 mutex_unlock(&smu->mutex);
1821 int smu_force_clk_levels(struct smu_context *smu,
1822 enum smu_clk_type clk_type,
1826 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1829 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1830 pr_debug("force clock level is for dpm manual mode only.\n");
1835 mutex_lock(&smu->mutex);
1837 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1838 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1841 mutex_unlock(&smu->mutex);
1846 int smu_set_mp1_state(struct smu_context *smu,
1847 enum pp_mp1_state mp1_state)
1853 * The SMC is not fully ready. That may be
1854 * expected as the IP may be masked.
1855 * So, just return without error.
1857 if (!smu->pm_enabled)
1860 mutex_lock(&smu->mutex);
1862 switch (mp1_state) {
1863 case PP_MP1_STATE_SHUTDOWN:
1864 msg = SMU_MSG_PrepareMp1ForShutdown;
1866 case PP_MP1_STATE_UNLOAD:
1867 msg = SMU_MSG_PrepareMp1ForUnload;
1869 case PP_MP1_STATE_RESET:
1870 msg = SMU_MSG_PrepareMp1ForReset;
1872 case PP_MP1_STATE_NONE:
1874 mutex_unlock(&smu->mutex);
1878 /* some asics may not support those messages */
1879 if (smu_msg_get_index(smu, msg) < 0) {
1880 mutex_unlock(&smu->mutex);
1884 ret = smu_send_smc_msg(smu, msg);
1886 pr_err("[PrepareMp1] Failed!\n");
1888 mutex_unlock(&smu->mutex);
1893 int smu_set_df_cstate(struct smu_context *smu,
1894 enum pp_df_cstate state)
1899 * The SMC is not fully ready. That may be
1900 * expected as the IP may be masked.
1901 * So, just return without error.
1903 if (!smu->pm_enabled)
1906 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1909 mutex_lock(&smu->mutex);
1911 ret = smu->ppt_funcs->set_df_cstate(smu, state);
1913 pr_err("[SetDfCstate] failed!\n");
1915 mutex_unlock(&smu->mutex);
1920 int smu_write_watermarks_table(struct smu_context *smu)
1923 struct smu_table_context *smu_table = &smu->smu_table;
1924 struct smu_table *table = NULL;
1926 table = &smu_table->tables[SMU_TABLE_WATERMARKS];
1928 if (!table->cpu_addr)
1931 ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
1937 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
1938 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
1941 struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
1942 void *table = watermarks->cpu_addr;
1944 mutex_lock(&smu->mutex);
1946 if (!smu->disable_watermark &&
1947 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1948 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1949 smu_set_watermarks_table(smu, table, clock_ranges);
1950 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1951 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1954 mutex_unlock(&smu->mutex);
1959 const struct amd_ip_funcs smu_ip_funcs = {
1961 .early_init = smu_early_init,
1962 .late_init = smu_late_init,
1963 .sw_init = smu_sw_init,
1964 .sw_fini = smu_sw_fini,
1965 .hw_init = smu_hw_init,
1966 .hw_fini = smu_hw_fini,
1967 .suspend = smu_suspend,
1968 .resume = smu_resume,
1970 .check_soft_reset = NULL,
1971 .wait_for_idle = NULL,
1973 .set_clockgating_state = smu_set_clockgating_state,
1974 .set_powergating_state = smu_set_powergating_state,
1975 .enable_umd_pstate = smu_enable_umd_pstate,
1978 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1980 .type = AMD_IP_BLOCK_TYPE_SMC,
1984 .funcs = &smu_ip_funcs,
1987 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
1989 .type = AMD_IP_BLOCK_TYPE_SMC,
1993 .funcs = &smu_ip_funcs,
1996 int smu_load_microcode(struct smu_context *smu)
2000 mutex_lock(&smu->mutex);
2002 if (smu->ppt_funcs->load_microcode)
2003 ret = smu->ppt_funcs->load_microcode(smu);
2005 mutex_unlock(&smu->mutex);
2010 int smu_check_fw_status(struct smu_context *smu)
2014 mutex_lock(&smu->mutex);
2016 if (smu->ppt_funcs->check_fw_status)
2017 ret = smu->ppt_funcs->check_fw_status(smu);
2019 mutex_unlock(&smu->mutex);
2024 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2028 mutex_lock(&smu->mutex);
2030 if (smu->ppt_funcs->set_gfx_cgpg)
2031 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2033 mutex_unlock(&smu->mutex);
2038 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
2042 mutex_lock(&smu->mutex);
2044 if (smu->ppt_funcs->set_fan_speed_rpm)
2045 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2047 mutex_unlock(&smu->mutex);
2052 int smu_get_power_limit(struct smu_context *smu,
2060 mutex_lock(&smu->mutex);
2062 if (smu->ppt_funcs->get_power_limit)
2063 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2066 mutex_unlock(&smu->mutex);
2071 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2075 mutex_lock(&smu->mutex);
2077 if (smu->ppt_funcs->set_power_limit)
2078 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2080 mutex_unlock(&smu->mutex);
2085 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2089 mutex_lock(&smu->mutex);
2091 if (smu->ppt_funcs->print_clk_levels)
2092 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2094 mutex_unlock(&smu->mutex);
2099 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2103 mutex_lock(&smu->mutex);
2105 if (smu->ppt_funcs->get_od_percentage)
2106 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2108 mutex_unlock(&smu->mutex);
2113 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2117 mutex_lock(&smu->mutex);
2119 if (smu->ppt_funcs->set_od_percentage)
2120 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2122 mutex_unlock(&smu->mutex);
2127 int smu_od_edit_dpm_table(struct smu_context *smu,
2128 enum PP_OD_DPM_TABLE_COMMAND type,
2129 long *input, uint32_t size)
2133 mutex_lock(&smu->mutex);
2135 if (smu->ppt_funcs->od_edit_dpm_table)
2136 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2138 mutex_unlock(&smu->mutex);
2143 int smu_read_sensor(struct smu_context *smu,
2144 enum amd_pp_sensors sensor,
2145 void *data, uint32_t *size)
2149 mutex_lock(&smu->mutex);
2151 if (smu->ppt_funcs->read_sensor)
2152 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2154 mutex_unlock(&smu->mutex);
2159 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2163 mutex_lock(&smu->mutex);
2165 if (smu->ppt_funcs->get_power_profile_mode)
2166 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2168 mutex_unlock(&smu->mutex);
2173 int smu_set_power_profile_mode(struct smu_context *smu,
2175 uint32_t param_size,
2181 mutex_lock(&smu->mutex);
2183 if (smu->ppt_funcs->set_power_profile_mode)
2184 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2187 mutex_unlock(&smu->mutex);
2193 int smu_get_fan_control_mode(struct smu_context *smu)
2197 mutex_lock(&smu->mutex);
2199 if (smu->ppt_funcs->get_fan_control_mode)
2200 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2202 mutex_unlock(&smu->mutex);
2207 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2211 mutex_lock(&smu->mutex);
2213 if (smu->ppt_funcs->set_fan_control_mode)
2214 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2216 mutex_unlock(&smu->mutex);
2221 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2225 mutex_lock(&smu->mutex);
2227 if (smu->ppt_funcs->get_fan_speed_percent)
2228 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2230 mutex_unlock(&smu->mutex);
2235 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2239 mutex_lock(&smu->mutex);
2241 if (smu->ppt_funcs->set_fan_speed_percent)
2242 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2244 mutex_unlock(&smu->mutex);
2249 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2253 mutex_lock(&smu->mutex);
2255 if (smu->ppt_funcs->get_fan_speed_rpm)
2256 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2258 mutex_unlock(&smu->mutex);
2263 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2267 mutex_lock(&smu->mutex);
2269 if (smu->ppt_funcs->set_deep_sleep_dcefclk)
2270 ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
2272 mutex_unlock(&smu->mutex);
2277 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2281 mutex_lock(&smu->mutex);
2283 if (smu->ppt_funcs->set_active_display_count)
2284 ret = smu->ppt_funcs->set_active_display_count(smu, count);
2286 mutex_unlock(&smu->mutex);
2291 int smu_get_clock_by_type(struct smu_context *smu,
2292 enum amd_pp_clock_type type,
2293 struct amd_pp_clocks *clocks)
2297 mutex_lock(&smu->mutex);
2299 if (smu->ppt_funcs->get_clock_by_type)
2300 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2302 mutex_unlock(&smu->mutex);
2307 int smu_get_max_high_clocks(struct smu_context *smu,
2308 struct amd_pp_simple_clock_info *clocks)
2312 mutex_lock(&smu->mutex);
2314 if (smu->ppt_funcs->get_max_high_clocks)
2315 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2317 mutex_unlock(&smu->mutex);
2322 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2323 enum smu_clk_type clk_type,
2324 struct pp_clock_levels_with_latency *clocks)
2328 mutex_lock(&smu->mutex);
2330 if (smu->ppt_funcs->get_clock_by_type_with_latency)
2331 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2333 mutex_unlock(&smu->mutex);
2338 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2339 enum amd_pp_clock_type type,
2340 struct pp_clock_levels_with_voltage *clocks)
2344 mutex_lock(&smu->mutex);
2346 if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2347 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2349 mutex_unlock(&smu->mutex);
2355 int smu_display_clock_voltage_request(struct smu_context *smu,
2356 struct pp_display_clock_request *clock_req)
2360 mutex_lock(&smu->mutex);
2362 if (smu->ppt_funcs->display_clock_voltage_request)
2363 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2365 mutex_unlock(&smu->mutex);
2371 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2375 mutex_lock(&smu->mutex);
2377 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2378 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2380 mutex_unlock(&smu->mutex);
2385 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2389 mutex_lock(&smu->mutex);
2391 if (smu->ppt_funcs->notify_smu_enable_pwe)
2392 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2394 mutex_unlock(&smu->mutex);
2399 int smu_set_xgmi_pstate(struct smu_context *smu,
2404 mutex_lock(&smu->mutex);
2406 if (smu->ppt_funcs->set_xgmi_pstate)
2407 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2409 mutex_unlock(&smu->mutex);
2414 int smu_set_azalia_d3_pme(struct smu_context *smu)
2418 mutex_lock(&smu->mutex);
2420 if (smu->ppt_funcs->set_azalia_d3_pme)
2421 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2423 mutex_unlock(&smu->mutex);
2428 bool smu_baco_is_support(struct smu_context *smu)
2432 mutex_lock(&smu->mutex);
2434 if (smu->ppt_funcs->baco_is_support)
2435 ret = smu->ppt_funcs->baco_is_support(smu);
2437 mutex_unlock(&smu->mutex);
2442 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2444 if (smu->ppt_funcs->baco_get_state)
2447 mutex_lock(&smu->mutex);
2448 *state = smu->ppt_funcs->baco_get_state(smu);
2449 mutex_unlock(&smu->mutex);
2454 int smu_baco_reset(struct smu_context *smu)
2458 mutex_lock(&smu->mutex);
2460 if (smu->ppt_funcs->baco_reset)
2461 ret = smu->ppt_funcs->baco_reset(smu);
2463 mutex_unlock(&smu->mutex);
2468 int smu_mode2_reset(struct smu_context *smu)
2472 mutex_lock(&smu->mutex);
2474 if (smu->ppt_funcs->mode2_reset)
2475 ret = smu->ppt_funcs->mode2_reset(smu);
2477 mutex_unlock(&smu->mutex);
2482 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2483 struct pp_smu_nv_clock_table *max_clocks)
2487 mutex_lock(&smu->mutex);
2489 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2490 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2492 mutex_unlock(&smu->mutex);
2497 int smu_get_uclk_dpm_states(struct smu_context *smu,
2498 unsigned int *clock_values_in_khz,
2499 unsigned int *num_states)
2503 mutex_lock(&smu->mutex);
2505 if (smu->ppt_funcs->get_uclk_dpm_states)
2506 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2508 mutex_unlock(&smu->mutex);
2513 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2515 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2517 mutex_lock(&smu->mutex);
2519 if (smu->ppt_funcs->get_current_power_state)
2520 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2522 mutex_unlock(&smu->mutex);
2527 int smu_get_dpm_clock_table(struct smu_context *smu,
2528 struct dpm_clocks *clock_table)
2532 mutex_lock(&smu->mutex);
2534 if (smu->ppt_funcs->get_dpm_clock_table)
2535 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2537 mutex_unlock(&smu->mutex);
2542 uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
2546 if (smu->ppt_funcs->get_pptable_power_limit)
2547 ret = smu->ppt_funcs->get_pptable_power_limit(smu);
2552 int smu_send_smc_msg(struct smu_context *smu,
2553 enum smu_message_type msg)
2557 ret = smu_send_smc_msg_with_param(smu, msg, 0);