2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "soc15_common.h"
30 #include "smu_v11_0.h"
31 #include "smu_v12_0.h"
35 #undef __SMU_DUMMY_MAP
36 #define __SMU_DUMMY_MAP(type) #type
37 static const char* __smu_message_names[] = {
41 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
43 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
44 return "unknown smu message";
45 return __smu_message_names[type];
48 #undef __SMU_DUMMY_MAP
49 #define __SMU_DUMMY_MAP(fea) #fea
50 static const char* __smu_feature_names[] = {
54 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
56 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
57 return "unknown smu feature";
58 return __smu_feature_names[feature];
61 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
65 uint32_t feature_mask[2] = { 0 };
66 int32_t feature_index = 0;
68 uint32_t sort_feature[SMU_FEATURE_COUNT];
69 uint64_t hw_feature_count = 0;
71 mutex_lock(&smu->mutex);
73 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
77 size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
78 feature_mask[1], feature_mask[0]);
80 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
81 feature_index = smu_feature_get_index(smu, i);
82 if (feature_index < 0)
84 sort_feature[feature_index] = i;
88 for (i = 0; i < hw_feature_count; i++) {
89 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
91 smu_get_feature_name(smu, sort_feature[i]),
93 !!smu_feature_is_enabled(smu, sort_feature[i]) ?
94 "enabled" : "disabled");
98 mutex_unlock(&smu->mutex);
103 static int smu_feature_update_enable_state(struct smu_context *smu,
104 uint64_t feature_mask,
107 struct smu_feature *feature = &smu->smu_feature;
108 uint32_t feature_low = 0, feature_high = 0;
111 if (!smu->pm_enabled)
114 feature_low = (feature_mask >> 0 ) & 0xffffffff;
115 feature_high = (feature_mask >> 32) & 0xffffffff;
118 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
122 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
127 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
131 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
137 mutex_lock(&feature->mutex);
139 bitmap_or(feature->enabled, feature->enabled,
140 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
142 bitmap_andnot(feature->enabled, feature->enabled,
143 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
144 mutex_unlock(&feature->mutex);
149 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
152 uint32_t feature_mask[2] = { 0 };
153 uint64_t feature_2_enabled = 0;
154 uint64_t feature_2_disabled = 0;
155 uint64_t feature_enables = 0;
157 mutex_lock(&smu->mutex);
159 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
163 feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
165 feature_2_enabled = ~feature_enables & new_mask;
166 feature_2_disabled = feature_enables & ~new_mask;
168 if (feature_2_enabled) {
169 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
173 if (feature_2_disabled) {
174 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
180 mutex_unlock(&smu->mutex);
185 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
189 if (!if_version && !smu_version)
193 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
197 ret = smu_read_smc_arg(smu, if_version);
203 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
207 ret = smu_read_smc_arg(smu, smu_version);
215 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
216 uint32_t min, uint32_t max)
220 if (min <= 0 && max <= 0)
223 if (!smu_clk_dpm_is_enabled(smu, clk_type))
226 ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
230 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
231 uint32_t min, uint32_t max)
233 int ret = 0, clk_id = 0;
236 if (min <= 0 && max <= 0)
239 if (!smu_clk_dpm_is_enabled(smu, clk_type))
242 clk_id = smu_clk_get_index(smu, clk_type);
247 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
248 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
255 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
256 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
266 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
267 uint32_t *min, uint32_t *max, bool lock_needed)
269 uint32_t clock_limit;
276 mutex_lock(&smu->mutex);
278 if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
282 clock_limit = smu->smu_table.boot_values.uclk;
286 clock_limit = smu->smu_table.boot_values.gfxclk;
289 clock_limit = smu->smu_table.boot_values.socclk;
296 /* clock in Mhz unit */
298 *min = clock_limit / 100;
300 *max = clock_limit / 100;
303 * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
304 * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
306 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
310 mutex_unlock(&smu->mutex);
315 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
316 uint16_t level, uint32_t *value)
318 int ret = 0, clk_id = 0;
324 if (!smu_clk_dpm_is_enabled(smu, clk_type))
327 clk_id = smu_clk_get_index(smu, clk_type);
331 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
333 ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
338 ret = smu_read_smc_arg(smu, ¶m);
342 /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
343 * now, we un-support it */
344 *value = param & 0x7fffffff;
349 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
352 return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
355 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
357 enum smu_feature_mask feature_id = 0;
362 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
366 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
369 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
375 if(!smu_feature_is_enabled(smu, feature_id)) {
383 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
388 mutex_lock(&smu->mutex);
390 switch (block_type) {
391 case AMD_IP_BLOCK_TYPE_UVD:
392 ret = smu_dpm_set_uvd_enable(smu, gate);
394 case AMD_IP_BLOCK_TYPE_VCE:
395 ret = smu_dpm_set_vce_enable(smu, gate);
397 case AMD_IP_BLOCK_TYPE_GFX:
398 ret = smu_gfx_off_control(smu, gate);
400 case AMD_IP_BLOCK_TYPE_SDMA:
401 ret = smu_powergate_sdma(smu, gate);
407 mutex_unlock(&smu->mutex);
412 int smu_get_power_num_states(struct smu_context *smu,
413 struct pp_states_info *state_info)
418 /* not support power state */
419 memset(state_info, 0, sizeof(struct pp_states_info));
420 state_info->nums = 1;
421 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
426 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
427 void *data, uint32_t *size)
429 struct smu_power_context *smu_power = &smu->smu_power;
430 struct smu_power_gate *power_gate = &smu_power->power_gate;
437 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
438 *((uint32_t *)data) = smu->pstate_sclk;
441 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
442 *((uint32_t *)data) = smu->pstate_mclk;
445 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
446 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
449 case AMDGPU_PP_SENSOR_UVD_POWER:
450 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
453 case AMDGPU_PP_SENSOR_VCE_POWER:
454 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
457 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
458 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
472 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
473 void *table_data, bool drv2smu)
475 struct smu_table_context *smu_table = &smu->smu_table;
476 struct amdgpu_device *adev = smu->adev;
477 struct smu_table *table = NULL;
479 int table_id = smu_table_get_index(smu, table_index);
481 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
484 table = &smu_table->tables[table_index];
487 memcpy(table->cpu_addr, table_data, table->size);
489 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
490 upper_32_bits(table->mc_address));
493 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
494 lower_32_bits(table->mc_address));
497 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
498 SMU_MSG_TransferTableDram2Smu :
499 SMU_MSG_TransferTableSmu2Dram,
500 table_id | ((argument & 0xFFFF) << 16));
504 /* flush hdp cache */
505 adev->nbio.funcs->hdp_flush(adev, NULL);
508 memcpy(table_data, table->cpu_addr, table->size);
513 bool is_support_sw_smu(struct amdgpu_device *adev)
515 if (adev->asic_type == CHIP_VEGA20)
516 return (amdgpu_dpm == 2) ? true : false;
517 else if (adev->asic_type >= CHIP_ARCTURUS)
523 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
528 if (adev->asic_type == CHIP_VEGA20)
534 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
536 struct smu_table_context *smu_table = &smu->smu_table;
537 uint32_t powerplay_table_size;
539 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
542 mutex_lock(&smu->mutex);
544 if (smu_table->hardcode_pptable)
545 *table = smu_table->hardcode_pptable;
547 *table = smu_table->power_play_table;
549 powerplay_table_size = smu_table->power_play_table_size;
551 mutex_unlock(&smu->mutex);
553 return powerplay_table_size;
556 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
558 struct smu_table_context *smu_table = &smu->smu_table;
559 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
562 if (!smu->pm_enabled)
564 if (header->usStructureSize != size) {
565 pr_err("pp table size not matched !\n");
569 mutex_lock(&smu->mutex);
570 if (!smu_table->hardcode_pptable)
571 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
572 if (!smu_table->hardcode_pptable) {
577 memcpy(smu_table->hardcode_pptable, buf, size);
578 smu_table->power_play_table = smu_table->hardcode_pptable;
579 smu_table->power_play_table_size = size;
581 ret = smu_reset(smu);
583 pr_info("smu reset failed, ret = %d\n", ret);
586 mutex_unlock(&smu->mutex);
590 int smu_feature_init_dpm(struct smu_context *smu)
592 struct smu_feature *feature = &smu->smu_feature;
594 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
596 if (!smu->pm_enabled)
598 mutex_lock(&feature->mutex);
599 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
600 mutex_unlock(&feature->mutex);
602 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
607 mutex_lock(&feature->mutex);
608 bitmap_or(feature->allowed, feature->allowed,
609 (unsigned long *)allowed_feature_mask,
610 feature->feature_num);
611 mutex_unlock(&feature->mutex);
617 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
619 struct amdgpu_device *adev = smu->adev;
620 struct smu_feature *feature = &smu->smu_feature;
624 if (adev->flags & AMD_IS_APU)
627 feature_id = smu_feature_get_index(smu, mask);
631 WARN_ON(feature_id > feature->feature_num);
633 mutex_lock(&feature->mutex);
634 ret = test_bit(feature_id, feature->enabled);
635 mutex_unlock(&feature->mutex);
640 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
643 struct smu_feature *feature = &smu->smu_feature;
646 feature_id = smu_feature_get_index(smu, mask);
650 WARN_ON(feature_id > feature->feature_num);
652 return smu_feature_update_enable_state(smu,
657 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
659 struct smu_feature *feature = &smu->smu_feature;
663 feature_id = smu_feature_get_index(smu, mask);
667 WARN_ON(feature_id > feature->feature_num);
669 mutex_lock(&feature->mutex);
670 ret = test_bit(feature_id, feature->supported);
671 mutex_unlock(&feature->mutex);
676 int smu_feature_set_supported(struct smu_context *smu,
677 enum smu_feature_mask mask,
680 struct smu_feature *feature = &smu->smu_feature;
684 feature_id = smu_feature_get_index(smu, mask);
688 WARN_ON(feature_id > feature->feature_num);
690 mutex_lock(&feature->mutex);
692 test_and_set_bit(feature_id, feature->supported);
694 test_and_clear_bit(feature_id, feature->supported);
695 mutex_unlock(&feature->mutex);
700 static int smu_set_funcs(struct amdgpu_device *adev)
702 struct smu_context *smu = &adev->smu;
704 switch (adev->asic_type) {
710 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
711 smu->od_enabled = true;
712 smu_v11_0_set_smu_funcs(smu);
715 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
716 smu->od_enabled = true;
717 smu_v12_0_set_smu_funcs(smu);
726 static int smu_early_init(void *handle)
728 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
729 struct smu_context *smu = &adev->smu;
732 smu->pm_enabled = !!amdgpu_dpm;
734 mutex_init(&smu->mutex);
736 return smu_set_funcs(adev);
739 static int smu_late_init(void *handle)
741 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
742 struct smu_context *smu = &adev->smu;
744 if (!smu->pm_enabled)
747 smu_handle_task(&adev->smu,
748 smu->smu_dpm.dpm_level,
749 AMD_PP_TASK_COMPLETE_INIT,
755 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
756 uint16_t *size, uint8_t *frev, uint8_t *crev,
759 struct amdgpu_device *adev = smu->adev;
762 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
763 size, frev, crev, &data_start))
766 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
771 static int smu_initialize_pptable(struct smu_context *smu)
777 static int smu_smc_table_sw_init(struct smu_context *smu)
781 ret = smu_initialize_pptable(smu);
783 pr_err("Failed to init smu_initialize_pptable!\n");
788 * Create smu_table structure, and init smc tables such as
789 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
791 ret = smu_init_smc_tables(smu);
793 pr_err("Failed to init smc tables!\n");
798 * Create smu_power_context structure, and allocate smu_dpm_context and
799 * context size to fill the smu_power_context data.
801 ret = smu_init_power(smu);
803 pr_err("Failed to init smu_init_power!\n");
810 static int smu_smc_table_sw_fini(struct smu_context *smu)
814 ret = smu_fini_smc_tables(smu);
816 pr_err("Failed to smu_fini_smc_tables!\n");
823 static int smu_sw_init(void *handle)
825 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
826 struct smu_context *smu = &adev->smu;
829 smu->pool_size = adev->pm.smu_prv_buffer_size;
830 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
831 mutex_init(&smu->smu_feature.mutex);
832 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
833 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
834 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
836 mutex_init(&smu->smu_baco.mutex);
837 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
838 smu->smu_baco.platform_support = false;
840 mutex_init(&smu->sensor_lock);
842 smu->watermarks_bitmap = 0;
843 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
844 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
846 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
847 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
848 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
849 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
850 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
851 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
852 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
853 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
855 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
856 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
857 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
858 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
859 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
860 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
861 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
862 smu->display_config = &adev->pm.pm_display_cfg;
864 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
865 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
866 ret = smu_init_microcode(smu);
868 pr_err("Failed to load smu firmware!\n");
872 ret = smu_smc_table_sw_init(smu);
874 pr_err("Failed to sw init smc table!\n");
878 ret = smu_register_irq_handler(smu);
880 pr_err("Failed to register smc irq handler!\n");
887 static int smu_sw_fini(void *handle)
889 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
890 struct smu_context *smu = &adev->smu;
893 kfree(smu->irq_source);
894 smu->irq_source = NULL;
896 ret = smu_smc_table_sw_fini(smu);
898 pr_err("Failed to sw fini smc table!\n");
902 ret = smu_fini_power(smu);
904 pr_err("Failed to init smu_fini_power!\n");
911 static int smu_init_fb_allocations(struct smu_context *smu)
913 struct amdgpu_device *adev = smu->adev;
914 struct smu_table_context *smu_table = &smu->smu_table;
915 struct smu_table *tables = smu_table->tables;
918 for (i = 0; i < SMU_TABLE_COUNT; i++) {
919 if (tables[i].size == 0)
921 ret = amdgpu_bo_create_kernel(adev,
926 &tables[i].mc_address,
927 &tables[i].cpu_addr);
935 if (tables[i].size == 0)
937 amdgpu_bo_free_kernel(&tables[i].bo,
938 &tables[i].mc_address,
939 &tables[i].cpu_addr);
945 static int smu_fini_fb_allocations(struct smu_context *smu)
947 struct smu_table_context *smu_table = &smu->smu_table;
948 struct smu_table *tables = smu_table->tables;
954 for (i = 0; i < SMU_TABLE_COUNT; i++) {
955 if (tables[i].size == 0)
957 amdgpu_bo_free_kernel(&tables[i].bo,
958 &tables[i].mc_address,
959 &tables[i].cpu_addr);
965 static int smu_smc_table_hw_init(struct smu_context *smu,
968 struct amdgpu_device *adev = smu->adev;
971 if (smu_is_dpm_running(smu) && adev->in_suspend) {
972 pr_info("dpm has been enabled\n");
976 if (adev->asic_type != CHIP_ARCTURUS) {
977 ret = smu_init_display_count(smu, 0);
983 /* get boot_values from vbios to set revision, gfxclk, and etc. */
984 ret = smu_get_vbios_bootup_values(smu);
988 ret = smu_setup_pptable(smu);
992 ret = smu_get_clk_info_from_vbios(smu);
997 * check if the format_revision in vbios is up to pptable header
998 * version, and the structure size is not 0.
1000 ret = smu_check_pptable(smu);
1005 * allocate vram bos to store smc table contents.
1007 ret = smu_init_fb_allocations(smu);
1012 * Parse pptable format and fill PPTable_t smc_pptable to
1013 * smu_table_context structure. And read the smc_dpm_table from vbios,
1014 * then fill it into smc_pptable.
1016 ret = smu_parse_pptable(smu);
1021 * Send msg GetDriverIfVersion to check if the return value is equal
1022 * with DRIVER_IF_VERSION of smc header.
1024 ret = smu_check_fw_version(smu);
1029 /* smu_dump_pptable(smu); */
1032 * Copy pptable bo in the vram to smc with SMU MSGs such as
1033 * SetDriverDramAddr and TransferTableDram2Smu.
1035 ret = smu_write_pptable(smu);
1039 /* issue Run*Btc msg */
1040 ret = smu_run_btc(smu);
1044 ret = smu_feature_set_allowed_mask(smu);
1048 ret = smu_system_features_control(smu, true);
1052 if (adev->asic_type != CHIP_ARCTURUS) {
1053 ret = smu_override_pcie_parameters(smu);
1057 ret = smu_notify_display_change(smu);
1062 * Set min deep sleep dce fclk with bootup value from vbios via
1063 * SetMinDeepSleepDcefclk MSG.
1065 ret = smu_set_min_dcef_deep_sleep(smu);
1071 * Set initialized values (get from vbios) to dpm tables context such as
1072 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1076 ret = smu_populate_smc_tables(smu);
1080 ret = smu_init_max_sustainable_clocks(smu);
1085 ret = smu_set_default_od_settings(smu, initialize);
1090 ret = smu_populate_umd_state_clk(smu);
1094 ret = smu_get_power_limit(smu, &smu->default_power_limit, true, false);
1100 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1102 ret = smu_set_tool_table_location(smu);
1104 if (!smu_is_dpm_running(smu))
1105 pr_info("dpm has been disabled\n");
1111 * smu_alloc_memory_pool - allocate memory pool in the system memory
1113 * @smu: amdgpu_device pointer
1115 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1116 * and DramLogSetDramAddr can notify it changed.
1118 * Returns 0 on success, error on failure.
1120 static int smu_alloc_memory_pool(struct smu_context *smu)
1122 struct amdgpu_device *adev = smu->adev;
1123 struct smu_table_context *smu_table = &smu->smu_table;
1124 struct smu_table *memory_pool = &smu_table->memory_pool;
1125 uint64_t pool_size = smu->pool_size;
1128 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1131 memory_pool->size = pool_size;
1132 memory_pool->align = PAGE_SIZE;
1133 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1135 switch (pool_size) {
1136 case SMU_MEMORY_POOL_SIZE_256_MB:
1137 case SMU_MEMORY_POOL_SIZE_512_MB:
1138 case SMU_MEMORY_POOL_SIZE_1_GB:
1139 case SMU_MEMORY_POOL_SIZE_2_GB:
1140 ret = amdgpu_bo_create_kernel(adev,
1143 memory_pool->domain,
1145 &memory_pool->mc_address,
1146 &memory_pool->cpu_addr);
1155 static int smu_free_memory_pool(struct smu_context *smu)
1157 struct smu_table_context *smu_table = &smu->smu_table;
1158 struct smu_table *memory_pool = &smu_table->memory_pool;
1161 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1164 amdgpu_bo_free_kernel(&memory_pool->bo,
1165 &memory_pool->mc_address,
1166 &memory_pool->cpu_addr);
1168 memset(memory_pool, 0, sizeof(struct smu_table));
1173 static int smu_start_smc_engine(struct smu_context *smu)
1175 struct amdgpu_device *adev = smu->adev;
1178 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1179 if (adev->asic_type < CHIP_NAVI10) {
1180 if (smu->funcs->load_microcode) {
1181 ret = smu->funcs->load_microcode(smu);
1188 if (smu->funcs->check_fw_status) {
1189 ret = smu->funcs->check_fw_status(smu);
1191 pr_err("SMC is not ready\n");
1197 static int smu_hw_init(void *handle)
1200 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1201 struct smu_context *smu = &adev->smu;
1203 ret = smu_start_smc_engine(smu);
1205 pr_err("SMU is not ready yet!\n");
1209 if (adev->flags & AMD_IS_APU) {
1210 smu_powergate_sdma(&adev->smu, false);
1211 smu_powergate_vcn(&adev->smu, false);
1212 smu_set_gfx_cgpg(&adev->smu, true);
1215 if (!smu->pm_enabled)
1218 ret = smu_feature_init_dpm(smu);
1222 ret = smu_smc_table_hw_init(smu, true);
1226 ret = smu_alloc_memory_pool(smu);
1231 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1234 ret = smu_notify_memory_pool_location(smu);
1238 ret = smu_start_thermal_control(smu);
1242 if (!smu->pm_enabled)
1243 adev->pm.dpm_enabled = false;
1245 adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1247 pr_info("SMU is initialized successfully!\n");
1255 static int smu_stop_dpms(struct smu_context *smu)
1257 return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
1260 static int smu_hw_fini(void *handle)
1262 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1263 struct smu_context *smu = &adev->smu;
1264 struct smu_table_context *table_context = &smu->smu_table;
1267 if (adev->flags & AMD_IS_APU) {
1268 smu_powergate_sdma(&adev->smu, true);
1269 smu_powergate_vcn(&adev->smu, true);
1272 ret = smu_stop_thermal_control(smu);
1274 pr_warn("Fail to stop thermal control!\n");
1278 ret = smu_stop_dpms(smu);
1280 pr_warn("Fail to stop Dpms!\n");
1284 kfree(table_context->driver_pptable);
1285 table_context->driver_pptable = NULL;
1287 kfree(table_context->max_sustainable_clocks);
1288 table_context->max_sustainable_clocks = NULL;
1290 kfree(table_context->overdrive_table);
1291 table_context->overdrive_table = NULL;
1293 ret = smu_fini_fb_allocations(smu);
1297 ret = smu_free_memory_pool(smu);
1304 int smu_reset(struct smu_context *smu)
1306 struct amdgpu_device *adev = smu->adev;
1309 ret = smu_hw_fini(adev);
1313 ret = smu_hw_init(adev);
1320 static int smu_suspend(void *handle)
1323 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1324 struct smu_context *smu = &adev->smu;
1325 bool baco_feature_is_enabled = false;
1327 if(!(adev->flags & AMD_IS_APU))
1328 baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
1330 ret = smu_system_features_control(smu, false);
1334 if (adev->in_gpu_reset && baco_feature_is_enabled) {
1335 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1337 pr_warn("set BACO feature enabled failed, return %d\n", ret);
1342 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1344 if (adev->asic_type >= CHIP_NAVI10 &&
1345 adev->gfx.rlc.funcs->stop)
1346 adev->gfx.rlc.funcs->stop(adev);
1351 static int smu_resume(void *handle)
1354 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1355 struct smu_context *smu = &adev->smu;
1357 pr_info("SMU is resuming...\n");
1359 ret = smu_start_smc_engine(smu);
1361 pr_err("SMU is not ready yet!\n");
1365 ret = smu_smc_table_hw_init(smu, false);
1369 ret = smu_start_thermal_control(smu);
1374 smu_set_gfx_cgpg(&adev->smu, true);
1376 smu->disable_uclk_switch = 0;
1378 pr_info("SMU is resumed successfully!\n");
1386 int smu_display_configuration_change(struct smu_context *smu,
1387 const struct amd_pp_display_configuration *display_config)
1390 int num_of_active_display = 0;
1392 if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1395 if (!display_config)
1398 mutex_lock(&smu->mutex);
1400 if (smu->funcs->set_deep_sleep_dcefclk)
1401 smu->funcs->set_deep_sleep_dcefclk(smu,
1402 display_config->min_dcef_deep_sleep_set_clk / 100);
1404 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1405 if (display_config->displays[index].controller_id != 0)
1406 num_of_active_display++;
1409 smu_set_active_display_count(smu, num_of_active_display);
1411 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1412 display_config->cpu_cc6_disable,
1413 display_config->cpu_pstate_disable,
1414 display_config->nb_pstate_switch_disable);
1416 mutex_unlock(&smu->mutex);
1421 static int smu_get_clock_info(struct smu_context *smu,
1422 struct smu_clock_info *clk_info,
1423 enum smu_perf_level_designation designation)
1426 struct smu_performance_level level = {0};
1431 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1435 clk_info->min_mem_clk = level.memory_clock;
1436 clk_info->min_eng_clk = level.core_clock;
1437 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1439 ret = smu_get_perf_level(smu, designation, &level);
1443 clk_info->min_mem_clk = level.memory_clock;
1444 clk_info->min_eng_clk = level.core_clock;
1445 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1450 int smu_get_current_clocks(struct smu_context *smu,
1451 struct amd_pp_clock_info *clocks)
1453 struct amd_pp_simple_clock_info simple_clocks = {0};
1454 struct smu_clock_info hw_clocks;
1457 if (!is_support_sw_smu(smu->adev))
1460 mutex_lock(&smu->mutex);
1462 smu_get_dal_power_level(smu, &simple_clocks);
1464 if (smu->support_power_containment)
1465 ret = smu_get_clock_info(smu, &hw_clocks,
1466 PERF_LEVEL_POWER_CONTAINMENT);
1468 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1471 pr_err("Error in smu_get_clock_info\n");
1475 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1476 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1477 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1478 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1479 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1480 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1481 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1482 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1484 if (simple_clocks.level == 0)
1485 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1487 clocks->max_clocks_state = simple_clocks.level;
1489 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1490 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1491 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1495 mutex_unlock(&smu->mutex);
1499 static int smu_set_clockgating_state(void *handle,
1500 enum amd_clockgating_state state)
1505 static int smu_set_powergating_state(void *handle,
1506 enum amd_powergating_state state)
1511 static int smu_enable_umd_pstate(void *handle,
1512 enum amd_dpm_forced_level *level)
1514 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1515 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1516 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1517 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1519 struct smu_context *smu = (struct smu_context*)(handle);
1520 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1522 if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
1525 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1526 /* enter umd pstate, save current level, disable gfx cg*/
1527 if (*level & profile_mode_mask) {
1528 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1529 smu_dpm_ctx->enable_umd_pstate = true;
1530 amdgpu_device_ip_set_clockgating_state(smu->adev,
1531 AMD_IP_BLOCK_TYPE_GFX,
1532 AMD_CG_STATE_UNGATE);
1533 amdgpu_device_ip_set_powergating_state(smu->adev,
1534 AMD_IP_BLOCK_TYPE_GFX,
1535 AMD_PG_STATE_UNGATE);
1538 /* exit umd pstate, restore level, enable gfx cg*/
1539 if (!(*level & profile_mode_mask)) {
1540 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1541 *level = smu_dpm_ctx->saved_dpm_level;
1542 smu_dpm_ctx->enable_umd_pstate = false;
1543 amdgpu_device_ip_set_clockgating_state(smu->adev,
1544 AMD_IP_BLOCK_TYPE_GFX,
1546 amdgpu_device_ip_set_powergating_state(smu->adev,
1547 AMD_IP_BLOCK_TYPE_GFX,
1555 static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1558 uint32_t sclk_mask, mclk_mask, soc_mask;
1561 case AMD_DPM_FORCED_LEVEL_HIGH:
1562 ret = smu_force_dpm_limit_value(smu, true);
1564 case AMD_DPM_FORCED_LEVEL_LOW:
1565 ret = smu_force_dpm_limit_value(smu, false);
1567 case AMD_DPM_FORCED_LEVEL_AUTO:
1568 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1569 ret = smu_unforce_dpm_levels(smu);
1571 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1572 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1573 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1574 ret = smu_get_profiling_clk_mask(smu, level,
1580 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
1581 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
1582 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
1584 case AMD_DPM_FORCED_LEVEL_MANUAL:
1585 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1592 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1593 enum amd_dpm_forced_level level,
1594 bool skip_display_settings)
1599 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1601 if (!smu->pm_enabled)
1604 if (!skip_display_settings) {
1605 ret = smu_display_config_changed(smu);
1607 pr_err("Failed to change display config!");
1612 ret = smu_apply_clocks_adjust_rules(smu);
1614 pr_err("Failed to apply clocks adjust rules!");
1618 if (!skip_display_settings) {
1619 ret = smu_notify_smc_dispaly_config(smu);
1621 pr_err("Failed to notify smc display config!");
1626 if (smu_dpm_ctx->dpm_level != level) {
1627 ret = smu_asic_set_performance_level(smu, level);
1629 ret = smu_default_set_performance_level(smu, level);
1631 pr_err("Failed to set performance level!");
1636 /* update the saved copy */
1637 smu_dpm_ctx->dpm_level = level;
1640 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1641 index = fls(smu->workload_mask);
1642 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1643 workload = smu->workload_setting[index];
1645 if (smu->power_profile_mode != workload)
1646 smu_set_power_profile_mode(smu, &workload, 0, false);
1652 int smu_handle_task(struct smu_context *smu,
1653 enum amd_dpm_forced_level level,
1654 enum amd_pp_task task_id,
1660 mutex_lock(&smu->mutex);
1663 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1664 ret = smu_pre_display_config_changed(smu);
1667 ret = smu_set_cpu_power_state(smu);
1670 ret = smu_adjust_power_state_dynamic(smu, level, false);
1672 case AMD_PP_TASK_COMPLETE_INIT:
1673 case AMD_PP_TASK_READJUST_POWER_STATE:
1674 ret = smu_adjust_power_state_dynamic(smu, level, true);
1682 mutex_unlock(&smu->mutex);
1687 int smu_switch_power_profile(struct smu_context *smu,
1688 enum PP_SMC_POWER_PROFILE type,
1691 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1695 if (!smu->pm_enabled)
1698 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1701 mutex_lock(&smu->mutex);
1704 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1705 index = fls(smu->workload_mask);
1706 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1707 workload = smu->workload_setting[index];
1709 smu->workload_mask |= (1 << smu->workload_prority[type]);
1710 index = fls(smu->workload_mask);
1711 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1712 workload = smu->workload_setting[index];
1715 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1716 smu_set_power_profile_mode(smu, &workload, 0, false);
1718 mutex_unlock(&smu->mutex);
1723 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1725 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1726 enum amd_dpm_forced_level level;
1728 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1731 mutex_lock(&(smu->mutex));
1732 level = smu_dpm_ctx->dpm_level;
1733 mutex_unlock(&(smu->mutex));
1738 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1740 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1743 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1746 mutex_lock(&smu->mutex);
1748 ret = smu_enable_umd_pstate(smu, &level);
1750 mutex_unlock(&smu->mutex);
1754 ret = smu_handle_task(smu, level,
1755 AMD_PP_TASK_READJUST_POWER_STATE,
1758 mutex_unlock(&smu->mutex);
1763 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1767 mutex_lock(&smu->mutex);
1768 ret = smu_init_display_count(smu, count);
1769 mutex_unlock(&smu->mutex);
1774 int smu_force_clk_levels(struct smu_context *smu,
1775 enum smu_clk_type clk_type,
1779 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1782 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1783 pr_debug("force clock level is for dpm manual mode only.\n");
1788 mutex_lock(&smu->mutex);
1790 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1791 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1794 mutex_unlock(&smu->mutex);
1799 int smu_set_mp1_state(struct smu_context *smu,
1800 enum pp_mp1_state mp1_state)
1806 * The SMC is not fully ready. That may be
1807 * expected as the IP may be masked.
1808 * So, just return without error.
1810 if (!smu->pm_enabled)
1813 mutex_lock(&smu->mutex);
1815 switch (mp1_state) {
1816 case PP_MP1_STATE_SHUTDOWN:
1817 msg = SMU_MSG_PrepareMp1ForShutdown;
1819 case PP_MP1_STATE_UNLOAD:
1820 msg = SMU_MSG_PrepareMp1ForUnload;
1822 case PP_MP1_STATE_RESET:
1823 msg = SMU_MSG_PrepareMp1ForReset;
1825 case PP_MP1_STATE_NONE:
1827 mutex_unlock(&smu->mutex);
1831 /* some asics may not support those messages */
1832 if (smu_msg_get_index(smu, msg) < 0) {
1833 mutex_unlock(&smu->mutex);
1837 ret = smu_send_smc_msg(smu, msg);
1839 pr_err("[PrepareMp1] Failed!\n");
1841 mutex_unlock(&smu->mutex);
1846 int smu_set_df_cstate(struct smu_context *smu,
1847 enum pp_df_cstate state)
1852 * The SMC is not fully ready. That may be
1853 * expected as the IP may be masked.
1854 * So, just return without error.
1856 if (!smu->pm_enabled)
1859 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1862 mutex_lock(&smu->mutex);
1864 ret = smu->ppt_funcs->set_df_cstate(smu, state);
1866 pr_err("[SetDfCstate] failed!\n");
1868 mutex_unlock(&smu->mutex);
1873 int smu_write_watermarks_table(struct smu_context *smu)
1876 struct smu_table_context *smu_table = &smu->smu_table;
1877 struct smu_table *table = NULL;
1879 table = &smu_table->tables[SMU_TABLE_WATERMARKS];
1881 if (!table->cpu_addr)
1884 ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
1890 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
1891 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
1894 struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
1895 void *table = watermarks->cpu_addr;
1897 mutex_lock(&smu->mutex);
1899 if (!smu->disable_watermark &&
1900 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1901 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1902 smu_set_watermarks_table(smu, table, clock_ranges);
1903 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1904 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1907 mutex_unlock(&smu->mutex);
1912 const struct amd_ip_funcs smu_ip_funcs = {
1914 .early_init = smu_early_init,
1915 .late_init = smu_late_init,
1916 .sw_init = smu_sw_init,
1917 .sw_fini = smu_sw_fini,
1918 .hw_init = smu_hw_init,
1919 .hw_fini = smu_hw_fini,
1920 .suspend = smu_suspend,
1921 .resume = smu_resume,
1923 .check_soft_reset = NULL,
1924 .wait_for_idle = NULL,
1926 .set_clockgating_state = smu_set_clockgating_state,
1927 .set_powergating_state = smu_set_powergating_state,
1928 .enable_umd_pstate = smu_enable_umd_pstate,
1931 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1933 .type = AMD_IP_BLOCK_TYPE_SMC,
1937 .funcs = &smu_ip_funcs,
1940 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
1942 .type = AMD_IP_BLOCK_TYPE_SMC,
1946 .funcs = &smu_ip_funcs,
1949 int smu_load_microcode(struct smu_context *smu)
1953 mutex_lock(&smu->mutex);
1955 if (smu->funcs->load_microcode)
1956 ret = smu->funcs->load_microcode(smu);
1958 mutex_unlock(&smu->mutex);
1963 int smu_check_fw_status(struct smu_context *smu)
1967 mutex_lock(&smu->mutex);
1969 if (smu->funcs->check_fw_status)
1970 ret = smu->funcs->check_fw_status(smu);
1972 mutex_unlock(&smu->mutex);
1977 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
1981 mutex_lock(&smu->mutex);
1983 if (smu->funcs->set_gfx_cgpg)
1984 ret = smu->funcs->set_gfx_cgpg(smu, enabled);
1986 mutex_unlock(&smu->mutex);
1991 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
1995 mutex_lock(&smu->mutex);
1997 if (smu->funcs->set_fan_speed_rpm)
1998 ret = smu->funcs->set_fan_speed_rpm(smu, speed);
2000 mutex_unlock(&smu->mutex);
2005 int smu_get_power_limit(struct smu_context *smu,
2013 mutex_lock(&smu->mutex);
2015 if (smu->ppt_funcs->get_power_limit)
2016 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2019 mutex_unlock(&smu->mutex);
2024 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2028 mutex_lock(&smu->mutex);
2030 if (smu->funcs->set_power_limit)
2031 ret = smu->funcs->set_power_limit(smu, limit);
2033 mutex_unlock(&smu->mutex);
2038 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2042 mutex_lock(&smu->mutex);
2044 if (smu->ppt_funcs->print_clk_levels)
2045 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2047 mutex_unlock(&smu->mutex);
2052 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2056 mutex_lock(&smu->mutex);
2058 if (smu->ppt_funcs->get_od_percentage)
2059 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2061 mutex_unlock(&smu->mutex);
2066 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2070 mutex_lock(&smu->mutex);
2072 if (smu->ppt_funcs->set_od_percentage)
2073 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2075 mutex_unlock(&smu->mutex);
2080 int smu_od_edit_dpm_table(struct smu_context *smu,
2081 enum PP_OD_DPM_TABLE_COMMAND type,
2082 long *input, uint32_t size)
2086 mutex_lock(&smu->mutex);
2088 if (smu->ppt_funcs->od_edit_dpm_table)
2089 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2091 mutex_unlock(&smu->mutex);
2096 int smu_read_sensor(struct smu_context *smu,
2097 enum amd_pp_sensors sensor,
2098 void *data, uint32_t *size)
2102 mutex_lock(&smu->mutex);
2104 if (smu->ppt_funcs->read_sensor)
2105 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2107 mutex_unlock(&smu->mutex);
2112 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2116 mutex_lock(&smu->mutex);
2118 if (smu->ppt_funcs->get_power_profile_mode)
2119 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2121 mutex_unlock(&smu->mutex);
2126 int smu_set_power_profile_mode(struct smu_context *smu,
2128 uint32_t param_size,
2134 mutex_lock(&smu->mutex);
2136 if (smu->ppt_funcs->set_power_profile_mode)
2137 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2140 mutex_unlock(&smu->mutex);
2146 int smu_get_fan_control_mode(struct smu_context *smu)
2150 mutex_lock(&smu->mutex);
2152 if (smu->funcs->get_fan_control_mode)
2153 ret = smu->funcs->get_fan_control_mode(smu);
2155 mutex_unlock(&smu->mutex);
2160 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2164 mutex_lock(&smu->mutex);
2166 if (smu->funcs->set_fan_control_mode)
2167 ret = smu->funcs->set_fan_control_mode(smu, value);
2169 mutex_unlock(&smu->mutex);
2174 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2178 mutex_lock(&smu->mutex);
2180 if (smu->ppt_funcs->get_fan_speed_percent)
2181 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2183 mutex_unlock(&smu->mutex);
2188 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2192 mutex_lock(&smu->mutex);
2194 if (smu->funcs->set_fan_speed_percent)
2195 ret = smu->funcs->set_fan_speed_percent(smu, speed);
2197 mutex_unlock(&smu->mutex);
2202 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2206 mutex_lock(&smu->mutex);
2208 if (smu->ppt_funcs->get_fan_speed_rpm)
2209 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2211 mutex_unlock(&smu->mutex);
2216 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2220 mutex_lock(&smu->mutex);
2222 if (smu->funcs->set_deep_sleep_dcefclk)
2223 ret = smu->funcs->set_deep_sleep_dcefclk(smu, clk);
2225 mutex_unlock(&smu->mutex);
2230 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2234 mutex_lock(&smu->mutex);
2236 if (smu->funcs->set_active_display_count)
2237 ret = smu->funcs->set_active_display_count(smu, count);
2239 mutex_unlock(&smu->mutex);
2244 int smu_get_clock_by_type(struct smu_context *smu,
2245 enum amd_pp_clock_type type,
2246 struct amd_pp_clocks *clocks)
2250 mutex_lock(&smu->mutex);
2252 if (smu->funcs->get_clock_by_type)
2253 ret = smu->funcs->get_clock_by_type(smu, type, clocks);
2255 mutex_unlock(&smu->mutex);
2260 int smu_get_max_high_clocks(struct smu_context *smu,
2261 struct amd_pp_simple_clock_info *clocks)
2265 mutex_lock(&smu->mutex);
2267 if (smu->funcs->get_max_high_clocks)
2268 ret = smu->funcs->get_max_high_clocks(smu, clocks);
2270 mutex_unlock(&smu->mutex);
2275 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2276 enum smu_clk_type clk_type,
2277 struct pp_clock_levels_with_latency *clocks)
2281 mutex_lock(&smu->mutex);
2283 if (smu->ppt_funcs->get_clock_by_type_with_latency)
2284 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2286 mutex_unlock(&smu->mutex);
2291 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2292 enum amd_pp_clock_type type,
2293 struct pp_clock_levels_with_voltage *clocks)
2297 mutex_lock(&smu->mutex);
2299 if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2300 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2302 mutex_unlock(&smu->mutex);
2308 int smu_display_clock_voltage_request(struct smu_context *smu,
2309 struct pp_display_clock_request *clock_req)
2313 mutex_lock(&smu->mutex);
2315 if (smu->funcs->display_clock_voltage_request)
2316 ret = smu->funcs->display_clock_voltage_request(smu, clock_req);
2318 mutex_unlock(&smu->mutex);
2324 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2328 mutex_lock(&smu->mutex);
2330 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2331 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2333 mutex_unlock(&smu->mutex);
2338 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2342 mutex_lock(&smu->mutex);
2344 if (smu->funcs->notify_smu_enable_pwe)
2345 ret = smu->funcs->notify_smu_enable_pwe(smu);
2347 mutex_unlock(&smu->mutex);
2352 int smu_set_xgmi_pstate(struct smu_context *smu,
2357 mutex_lock(&smu->mutex);
2359 if (smu->funcs->set_xgmi_pstate)
2360 ret = smu->funcs->set_xgmi_pstate(smu, pstate);
2362 mutex_unlock(&smu->mutex);
2367 int smu_set_azalia_d3_pme(struct smu_context *smu)
2371 mutex_lock(&smu->mutex);
2373 if (smu->funcs->set_azalia_d3_pme)
2374 ret = smu->funcs->set_azalia_d3_pme(smu);
2376 mutex_unlock(&smu->mutex);
2381 bool smu_baco_is_support(struct smu_context *smu)
2385 mutex_lock(&smu->mutex);
2387 if (smu->funcs->baco_is_support)
2388 ret = smu->funcs->baco_is_support(smu);
2390 mutex_unlock(&smu->mutex);
2395 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2397 if (smu->funcs->baco_get_state)
2400 mutex_lock(&smu->mutex);
2401 *state = smu->funcs->baco_get_state(smu);
2402 mutex_unlock(&smu->mutex);
2407 int smu_baco_reset(struct smu_context *smu)
2411 mutex_lock(&smu->mutex);
2413 if (smu->funcs->baco_reset)
2414 ret = smu->funcs->baco_reset(smu);
2416 mutex_unlock(&smu->mutex);
2421 int smu_mode2_reset(struct smu_context *smu)
2425 mutex_lock(&smu->mutex);
2427 if (smu->funcs->mode2_reset)
2428 ret = smu->funcs->mode2_reset(smu);
2430 mutex_unlock(&smu->mutex);
2435 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2436 struct pp_smu_nv_clock_table *max_clocks)
2440 mutex_lock(&smu->mutex);
2442 if (smu->funcs->get_max_sustainable_clocks_by_dc)
2443 ret = smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2445 mutex_unlock(&smu->mutex);
2450 int smu_get_uclk_dpm_states(struct smu_context *smu,
2451 unsigned int *clock_values_in_khz,
2452 unsigned int *num_states)
2456 mutex_lock(&smu->mutex);
2458 if (smu->ppt_funcs->get_uclk_dpm_states)
2459 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2461 mutex_unlock(&smu->mutex);
2466 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2468 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2470 mutex_lock(&smu->mutex);
2472 if (smu->ppt_funcs->get_current_power_state)
2473 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2475 mutex_unlock(&smu->mutex);
2480 int smu_get_dpm_clock_table(struct smu_context *smu,
2481 struct dpm_clocks *clock_table)
2485 mutex_lock(&smu->mutex);
2487 if (smu->ppt_funcs->get_dpm_clock_table)
2488 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2490 mutex_unlock(&smu->mutex);