2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
27 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
29 #include "smu_v11_0.h"
30 #include "smu_v12_0.h"
34 #undef __SMU_DUMMY_MAP
35 #define __SMU_DUMMY_MAP(type) #type
36 static const char* __smu_message_names[] = {
40 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
42 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
43 return "unknown smu message";
44 return __smu_message_names[type];
47 #undef __SMU_DUMMY_MAP
48 #define __SMU_DUMMY_MAP(fea) #fea
49 static const char* __smu_feature_names[] = {
53 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
55 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
56 return "unknown smu feature";
57 return __smu_feature_names[feature];
60 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
64 uint32_t feature_mask[2] = { 0 };
65 int32_t feature_index = 0;
67 uint32_t sort_feature[SMU_FEATURE_COUNT];
68 uint64_t hw_feature_count = 0;
70 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
74 size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
75 feature_mask[1], feature_mask[0]);
77 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
78 feature_index = smu_feature_get_index(smu, i);
79 if (feature_index < 0)
81 sort_feature[feature_index] = i;
85 for (i = 0; i < hw_feature_count; i++) {
86 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
88 smu_get_feature_name(smu, sort_feature[i]),
90 !!smu_feature_is_enabled(smu, sort_feature[i]) ?
91 "enabled" : "disabled");
98 static int smu_feature_update_enable_state(struct smu_context *smu,
99 uint64_t feature_mask,
102 struct smu_feature *feature = &smu->smu_feature;
103 uint32_t feature_low = 0, feature_high = 0;
106 if (!smu->pm_enabled)
109 feature_low = (feature_mask >> 0 ) & 0xffffffff;
110 feature_high = (feature_mask >> 32) & 0xffffffff;
113 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
117 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
122 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
126 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
132 mutex_lock(&feature->mutex);
134 bitmap_or(feature->enabled, feature->enabled,
135 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
137 bitmap_andnot(feature->enabled, feature->enabled,
138 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
139 mutex_unlock(&feature->mutex);
144 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
147 uint32_t feature_mask[2] = { 0 };
148 uint64_t feature_2_enabled = 0;
149 uint64_t feature_2_disabled = 0;
150 uint64_t feature_enables = 0;
152 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
156 feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
158 feature_2_enabled = ~feature_enables & new_mask;
159 feature_2_disabled = feature_enables & ~new_mask;
161 if (feature_2_enabled) {
162 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
166 if (feature_2_disabled) {
167 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
175 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
179 if (!if_version && !smu_version)
183 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
187 ret = smu_read_smc_arg(smu, if_version);
193 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
197 ret = smu_read_smc_arg(smu, smu_version);
205 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
206 uint32_t min, uint32_t max)
208 int ret = 0, clk_id = 0;
211 if (min <= 0 && max <= 0)
214 if (!smu_clk_dpm_is_enabled(smu, clk_type))
217 clk_id = smu_clk_get_index(smu, clk_type);
222 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
223 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
230 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
231 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
241 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
242 uint32_t min, uint32_t max)
244 int ret = 0, clk_id = 0;
247 if (min <= 0 && max <= 0)
250 if (!smu_clk_dpm_is_enabled(smu, clk_type))
253 clk_id = smu_clk_get_index(smu, clk_type);
258 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
259 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
266 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
267 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
277 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
278 uint32_t *min, uint32_t *max)
280 uint32_t clock_limit;
286 if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
290 clock_limit = smu->smu_table.boot_values.uclk;
294 clock_limit = smu->smu_table.boot_values.gfxclk;
297 clock_limit = smu->smu_table.boot_values.socclk;
304 /* clock in Mhz unit */
306 *min = clock_limit / 100;
308 *max = clock_limit / 100;
313 * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
314 * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
316 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
320 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
321 uint16_t level, uint32_t *value)
323 int ret = 0, clk_id = 0;
329 if (!smu_clk_dpm_is_enabled(smu, clk_type))
332 clk_id = smu_clk_get_index(smu, clk_type);
336 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
338 ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
343 ret = smu_read_smc_arg(smu, ¶m);
347 /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
348 * now, we un-support it */
349 *value = param & 0x7fffffff;
354 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
357 return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
360 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
362 enum smu_feature_mask feature_id = 0;
367 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
371 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
374 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
380 if(!smu_feature_is_enabled(smu, feature_id)) {
388 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
393 switch (block_type) {
394 case AMD_IP_BLOCK_TYPE_UVD:
395 ret = smu_dpm_set_uvd_enable(smu, gate);
397 case AMD_IP_BLOCK_TYPE_VCE:
398 ret = smu_dpm_set_vce_enable(smu, gate);
400 case AMD_IP_BLOCK_TYPE_GFX:
401 ret = smu_gfx_off_control(smu, gate);
410 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
412 /* not support power state */
413 return POWER_STATE_TYPE_DEFAULT;
416 int smu_get_power_num_states(struct smu_context *smu,
417 struct pp_states_info *state_info)
422 /* not support power state */
423 memset(state_info, 0, sizeof(struct pp_states_info));
424 state_info->nums = 1;
425 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
430 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
431 void *data, uint32_t *size)
433 struct smu_power_context *smu_power = &smu->smu_power;
434 struct smu_power_gate *power_gate = &smu_power->power_gate;
441 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
442 *((uint32_t *)data) = smu->pstate_sclk;
445 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
446 *((uint32_t *)data) = smu->pstate_mclk;
449 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
450 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
453 case AMDGPU_PP_SENSOR_UVD_POWER:
454 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
457 case AMDGPU_PP_SENSOR_VCE_POWER:
458 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
461 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
462 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
476 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
477 void *table_data, bool drv2smu)
479 struct smu_table_context *smu_table = &smu->smu_table;
480 struct amdgpu_device *adev = smu->adev;
481 struct smu_table *table = NULL;
483 int table_id = smu_table_get_index(smu, table_index);
485 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
488 table = &smu_table->tables[table_index];
491 memcpy(table->cpu_addr, table_data, table->size);
493 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
494 upper_32_bits(table->mc_address));
497 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
498 lower_32_bits(table->mc_address));
501 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
502 SMU_MSG_TransferTableDram2Smu :
503 SMU_MSG_TransferTableSmu2Dram,
504 table_id | ((argument & 0xFFFF) << 16));
508 /* flush hdp cache */
509 adev->nbio.funcs->hdp_flush(adev, NULL);
512 memcpy(table_data, table->cpu_addr, table->size);
517 bool is_support_sw_smu(struct amdgpu_device *adev)
519 if (adev->asic_type == CHIP_VEGA20)
520 return (amdgpu_dpm == 2) ? true : false;
521 else if (adev->asic_type >= CHIP_ARCTURUS)
527 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
532 if (adev->asic_type == CHIP_VEGA20)
538 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
540 struct smu_table_context *smu_table = &smu->smu_table;
542 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
545 if (smu_table->hardcode_pptable)
546 *table = smu_table->hardcode_pptable;
548 *table = smu_table->power_play_table;
550 return smu_table->power_play_table_size;
553 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
555 struct smu_table_context *smu_table = &smu->smu_table;
556 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
559 if (!smu->pm_enabled)
561 if (header->usStructureSize != size) {
562 pr_err("pp table size not matched !\n");
566 mutex_lock(&smu->mutex);
567 if (!smu_table->hardcode_pptable)
568 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
569 if (!smu_table->hardcode_pptable) {
574 memcpy(smu_table->hardcode_pptable, buf, size);
575 smu_table->power_play_table = smu_table->hardcode_pptable;
576 smu_table->power_play_table_size = size;
577 mutex_unlock(&smu->mutex);
579 ret = smu_reset(smu);
581 pr_info("smu reset failed, ret = %d\n", ret);
586 mutex_unlock(&smu->mutex);
590 int smu_feature_init_dpm(struct smu_context *smu)
592 struct smu_feature *feature = &smu->smu_feature;
594 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
596 if (!smu->pm_enabled)
598 mutex_lock(&feature->mutex);
599 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
600 mutex_unlock(&feature->mutex);
602 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
607 mutex_lock(&feature->mutex);
608 bitmap_or(feature->allowed, feature->allowed,
609 (unsigned long *)allowed_feature_mask,
610 feature->feature_num);
611 mutex_unlock(&feature->mutex);
617 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
619 struct amdgpu_device *adev = smu->adev;
620 struct smu_feature *feature = &smu->smu_feature;
624 if (adev->flags & AMD_IS_APU)
627 feature_id = smu_feature_get_index(smu, mask);
631 WARN_ON(feature_id > feature->feature_num);
633 mutex_lock(&feature->mutex);
634 ret = test_bit(feature_id, feature->enabled);
635 mutex_unlock(&feature->mutex);
640 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
643 struct smu_feature *feature = &smu->smu_feature;
646 feature_id = smu_feature_get_index(smu, mask);
650 WARN_ON(feature_id > feature->feature_num);
652 return smu_feature_update_enable_state(smu,
657 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
659 struct smu_feature *feature = &smu->smu_feature;
663 feature_id = smu_feature_get_index(smu, mask);
667 WARN_ON(feature_id > feature->feature_num);
669 mutex_lock(&feature->mutex);
670 ret = test_bit(feature_id, feature->supported);
671 mutex_unlock(&feature->mutex);
676 int smu_feature_set_supported(struct smu_context *smu,
677 enum smu_feature_mask mask,
680 struct smu_feature *feature = &smu->smu_feature;
684 feature_id = smu_feature_get_index(smu, mask);
688 WARN_ON(feature_id > feature->feature_num);
690 mutex_lock(&feature->mutex);
692 test_and_set_bit(feature_id, feature->supported);
694 test_and_clear_bit(feature_id, feature->supported);
695 mutex_unlock(&feature->mutex);
700 static int smu_set_funcs(struct amdgpu_device *adev)
702 struct smu_context *smu = &adev->smu;
704 switch (adev->asic_type) {
710 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
711 smu->od_enabled = true;
712 smu_v11_0_set_smu_funcs(smu);
715 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
716 smu->od_enabled = true;
717 smu_v12_0_set_smu_funcs(smu);
726 static int smu_early_init(void *handle)
728 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
729 struct smu_context *smu = &adev->smu;
732 smu->pm_enabled = !!amdgpu_dpm;
733 mutex_init(&smu->mutex);
735 return smu_set_funcs(adev);
738 static int smu_late_init(void *handle)
740 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
741 struct smu_context *smu = &adev->smu;
743 if (!smu->pm_enabled)
746 mutex_lock(&smu->mutex);
747 smu_handle_task(&adev->smu,
748 smu->smu_dpm.dpm_level,
749 AMD_PP_TASK_COMPLETE_INIT);
750 mutex_unlock(&smu->mutex);
755 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
756 uint16_t *size, uint8_t *frev, uint8_t *crev,
759 struct amdgpu_device *adev = smu->adev;
762 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
763 size, frev, crev, &data_start))
766 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
771 static int smu_initialize_pptable(struct smu_context *smu)
777 static int smu_smc_table_sw_init(struct smu_context *smu)
781 ret = smu_initialize_pptable(smu);
783 pr_err("Failed to init smu_initialize_pptable!\n");
788 * Create smu_table structure, and init smc tables such as
789 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
791 ret = smu_init_smc_tables(smu);
793 pr_err("Failed to init smc tables!\n");
798 * Create smu_power_context structure, and allocate smu_dpm_context and
799 * context size to fill the smu_power_context data.
801 ret = smu_init_power(smu);
803 pr_err("Failed to init smu_init_power!\n");
810 static int smu_smc_table_sw_fini(struct smu_context *smu)
814 ret = smu_fini_smc_tables(smu);
816 pr_err("Failed to smu_fini_smc_tables!\n");
823 static int smu_sw_init(void *handle)
825 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
826 struct smu_context *smu = &adev->smu;
829 smu->pool_size = adev->pm.smu_prv_buffer_size;
830 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
831 mutex_init(&smu->smu_feature.mutex);
832 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
833 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
834 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
836 mutex_init(&smu->smu_baco.mutex);
837 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
838 smu->smu_baco.platform_support = false;
840 smu->watermarks_bitmap = 0;
841 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
842 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
844 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
845 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
846 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
847 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
848 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
849 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
850 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
851 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
853 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
854 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
855 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
856 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
857 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
858 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
859 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
860 smu->display_config = &adev->pm.pm_display_cfg;
862 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
863 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
864 ret = smu_init_microcode(smu);
866 pr_err("Failed to load smu firmware!\n");
870 ret = smu_smc_table_sw_init(smu);
872 pr_err("Failed to sw init smc table!\n");
876 ret = smu_register_irq_handler(smu);
878 pr_err("Failed to register smc irq handler!\n");
885 static int smu_sw_fini(void *handle)
887 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
888 struct smu_context *smu = &adev->smu;
891 kfree(smu->irq_source);
892 smu->irq_source = NULL;
894 ret = smu_smc_table_sw_fini(smu);
896 pr_err("Failed to sw fini smc table!\n");
900 ret = smu_fini_power(smu);
902 pr_err("Failed to init smu_fini_power!\n");
909 static int smu_init_fb_allocations(struct smu_context *smu)
911 struct amdgpu_device *adev = smu->adev;
912 struct smu_table_context *smu_table = &smu->smu_table;
913 struct smu_table *tables = smu_table->tables;
917 for (i = 0; i < SMU_TABLE_COUNT; i++) {
918 if (tables[i].size == 0)
920 ret = amdgpu_bo_create_kernel(adev,
925 &tables[i].mc_address,
926 &tables[i].cpu_addr);
934 if (tables[i].size == 0)
936 amdgpu_bo_free_kernel(&tables[i].bo,
937 &tables[i].mc_address,
938 &tables[i].cpu_addr);
944 static int smu_fini_fb_allocations(struct smu_context *smu)
946 struct smu_table_context *smu_table = &smu->smu_table;
947 struct smu_table *tables = smu_table->tables;
953 for (i = 0; i < SMU_TABLE_COUNT; i++) {
954 if (tables[i].size == 0)
956 amdgpu_bo_free_kernel(&tables[i].bo,
957 &tables[i].mc_address,
958 &tables[i].cpu_addr);
964 static int smu_override_pcie_parameters(struct smu_context *smu)
966 struct amdgpu_device *adev = smu->adev;
967 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
970 if (adev->flags & AMD_IS_APU)
973 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
975 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
977 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
979 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
982 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
983 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
984 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
986 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
988 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
990 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
992 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
994 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
996 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
999 smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
1000 ret = smu_send_smc_msg_with_param(smu,
1001 SMU_MSG_OverridePcieParameters,
1004 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
1008 static int smu_smc_table_hw_init(struct smu_context *smu,
1011 struct amdgpu_device *adev = smu->adev;
1014 if (smu_is_dpm_running(smu) && adev->in_suspend) {
1015 pr_info("dpm has been enabled\n");
1019 if (adev->asic_type != CHIP_ARCTURUS) {
1020 ret = smu_init_display_count(smu, 0);
1026 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1027 ret = smu_get_vbios_bootup_values(smu);
1031 ret = smu_setup_pptable(smu);
1035 ret = smu_get_clk_info_from_vbios(smu);
1040 * check if the format_revision in vbios is up to pptable header
1041 * version, and the structure size is not 0.
1043 ret = smu_check_pptable(smu);
1048 * allocate vram bos to store smc table contents.
1050 ret = smu_init_fb_allocations(smu);
1055 * Parse pptable format and fill PPTable_t smc_pptable to
1056 * smu_table_context structure. And read the smc_dpm_table from vbios,
1057 * then fill it into smc_pptable.
1059 ret = smu_parse_pptable(smu);
1064 * Send msg GetDriverIfVersion to check if the return value is equal
1065 * with DRIVER_IF_VERSION of smc header.
1067 ret = smu_check_fw_version(smu);
1072 /* smu_dump_pptable(smu); */
1075 * Copy pptable bo in the vram to smc with SMU MSGs such as
1076 * SetDriverDramAddr and TransferTableDram2Smu.
1078 ret = smu_write_pptable(smu);
1082 /* issue Run*Btc msg */
1083 ret = smu_run_btc(smu);
1087 ret = smu_feature_set_allowed_mask(smu);
1091 ret = smu_system_features_control(smu, true);
1095 if (adev->asic_type != CHIP_ARCTURUS) {
1096 ret = smu_override_pcie_parameters(smu);
1100 ret = smu_notify_display_change(smu);
1105 * Set min deep sleep dce fclk with bootup value from vbios via
1106 * SetMinDeepSleepDcefclk MSG.
1108 ret = smu_set_min_dcef_deep_sleep(smu);
1114 * Set initialized values (get from vbios) to dpm tables context such as
1115 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1119 ret = smu_populate_smc_tables(smu);
1123 ret = smu_init_max_sustainable_clocks(smu);
1128 ret = smu_set_default_od_settings(smu, initialize);
1133 ret = smu_populate_umd_state_clk(smu);
1137 ret = smu_get_power_limit(smu, &smu->default_power_limit, true);
1143 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1145 ret = smu_set_tool_table_location(smu);
1147 if (!smu_is_dpm_running(smu))
1148 pr_info("dpm has been disabled\n");
1154 * smu_alloc_memory_pool - allocate memory pool in the system memory
1156 * @smu: amdgpu_device pointer
1158 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1159 * and DramLogSetDramAddr can notify it changed.
1161 * Returns 0 on success, error on failure.
1163 static int smu_alloc_memory_pool(struct smu_context *smu)
1165 struct amdgpu_device *adev = smu->adev;
1166 struct smu_table_context *smu_table = &smu->smu_table;
1167 struct smu_table *memory_pool = &smu_table->memory_pool;
1168 uint64_t pool_size = smu->pool_size;
1171 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1174 memory_pool->size = pool_size;
1175 memory_pool->align = PAGE_SIZE;
1176 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1178 switch (pool_size) {
1179 case SMU_MEMORY_POOL_SIZE_256_MB:
1180 case SMU_MEMORY_POOL_SIZE_512_MB:
1181 case SMU_MEMORY_POOL_SIZE_1_GB:
1182 case SMU_MEMORY_POOL_SIZE_2_GB:
1183 ret = amdgpu_bo_create_kernel(adev,
1186 memory_pool->domain,
1188 &memory_pool->mc_address,
1189 &memory_pool->cpu_addr);
1198 static int smu_free_memory_pool(struct smu_context *smu)
1200 struct smu_table_context *smu_table = &smu->smu_table;
1201 struct smu_table *memory_pool = &smu_table->memory_pool;
1204 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1207 amdgpu_bo_free_kernel(&memory_pool->bo,
1208 &memory_pool->mc_address,
1209 &memory_pool->cpu_addr);
1211 memset(memory_pool, 0, sizeof(struct smu_table));
1216 static int smu_hw_init(void *handle)
1219 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1220 struct smu_context *smu = &adev->smu;
1222 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1223 if (adev->asic_type < CHIP_NAVI10) {
1224 ret = smu_load_microcode(smu);
1230 ret = smu_check_fw_status(smu);
1232 pr_err("SMC firmware status is not correct\n");
1236 if (adev->flags & AMD_IS_APU) {
1237 smu_powergate_sdma(&adev->smu, false);
1238 smu_powergate_vcn(&adev->smu, false);
1241 if (!smu->pm_enabled)
1244 ret = smu_feature_init_dpm(smu);
1248 ret = smu_smc_table_hw_init(smu, true);
1252 ret = smu_alloc_memory_pool(smu);
1257 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1260 ret = smu_notify_memory_pool_location(smu);
1264 ret = smu_start_thermal_control(smu);
1268 if (!smu->pm_enabled)
1269 adev->pm.dpm_enabled = false;
1271 adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1273 pr_info("SMU is initialized successfully!\n");
1281 static int smu_stop_dpms(struct smu_context *smu)
1283 return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
1286 static int smu_hw_fini(void *handle)
1288 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1289 struct smu_context *smu = &adev->smu;
1290 struct smu_table_context *table_context = &smu->smu_table;
1293 if (adev->flags & AMD_IS_APU) {
1294 smu_powergate_sdma(&adev->smu, true);
1295 smu_powergate_vcn(&adev->smu, true);
1298 ret = smu_stop_thermal_control(smu);
1300 pr_warn("Fail to stop thermal control!\n");
1304 ret = smu_stop_dpms(smu);
1306 pr_warn("Fail to stop Dpms!\n");
1310 kfree(table_context->driver_pptable);
1311 table_context->driver_pptable = NULL;
1313 kfree(table_context->max_sustainable_clocks);
1314 table_context->max_sustainable_clocks = NULL;
1316 kfree(table_context->overdrive_table);
1317 table_context->overdrive_table = NULL;
1319 ret = smu_fini_fb_allocations(smu);
1323 ret = smu_free_memory_pool(smu);
1330 int smu_reset(struct smu_context *smu)
1332 struct amdgpu_device *adev = smu->adev;
1335 ret = smu_hw_fini(adev);
1339 ret = smu_hw_init(adev);
1346 static int smu_suspend(void *handle)
1349 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1350 struct smu_context *smu = &adev->smu;
1351 bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
1353 ret = smu_system_features_control(smu, false);
1357 if (adev->in_gpu_reset && baco_feature_is_enabled) {
1358 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1360 pr_warn("set BACO feature enabled failed, return %d\n", ret);
1365 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1367 if (adev->asic_type >= CHIP_NAVI10 &&
1368 adev->gfx.rlc.funcs->stop)
1369 adev->gfx.rlc.funcs->stop(adev);
1374 static int smu_resume(void *handle)
1377 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1378 struct smu_context *smu = &adev->smu;
1380 pr_info("SMU is resuming...\n");
1382 mutex_lock(&smu->mutex);
1384 ret = smu_smc_table_hw_init(smu, false);
1388 ret = smu_start_thermal_control(smu);
1392 mutex_unlock(&smu->mutex);
1394 pr_info("SMU is resumed successfully!\n");
1398 mutex_unlock(&smu->mutex);
1402 int smu_display_configuration_change(struct smu_context *smu,
1403 const struct amd_pp_display_configuration *display_config)
1406 int num_of_active_display = 0;
1408 if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1411 if (!display_config)
1414 mutex_lock(&smu->mutex);
1416 smu_set_deep_sleep_dcefclk(smu,
1417 display_config->min_dcef_deep_sleep_set_clk / 100);
1419 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1420 if (display_config->displays[index].controller_id != 0)
1421 num_of_active_display++;
1424 smu_set_active_display_count(smu, num_of_active_display);
1426 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1427 display_config->cpu_cc6_disable,
1428 display_config->cpu_pstate_disable,
1429 display_config->nb_pstate_switch_disable);
1431 mutex_unlock(&smu->mutex);
1436 static int smu_get_clock_info(struct smu_context *smu,
1437 struct smu_clock_info *clk_info,
1438 enum smu_perf_level_designation designation)
1441 struct smu_performance_level level = {0};
1446 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1450 clk_info->min_mem_clk = level.memory_clock;
1451 clk_info->min_eng_clk = level.core_clock;
1452 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1454 ret = smu_get_perf_level(smu, designation, &level);
1458 clk_info->min_mem_clk = level.memory_clock;
1459 clk_info->min_eng_clk = level.core_clock;
1460 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1465 int smu_get_current_clocks(struct smu_context *smu,
1466 struct amd_pp_clock_info *clocks)
1468 struct amd_pp_simple_clock_info simple_clocks = {0};
1469 struct smu_clock_info hw_clocks;
1472 if (!is_support_sw_smu(smu->adev))
1475 mutex_lock(&smu->mutex);
1477 smu_get_dal_power_level(smu, &simple_clocks);
1479 if (smu->support_power_containment)
1480 ret = smu_get_clock_info(smu, &hw_clocks,
1481 PERF_LEVEL_POWER_CONTAINMENT);
1483 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1486 pr_err("Error in smu_get_clock_info\n");
1490 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1491 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1492 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1493 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1494 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1495 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1496 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1497 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1499 if (simple_clocks.level == 0)
1500 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1502 clocks->max_clocks_state = simple_clocks.level;
1504 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1505 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1506 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1510 mutex_unlock(&smu->mutex);
1514 static int smu_set_clockgating_state(void *handle,
1515 enum amd_clockgating_state state)
1520 static int smu_set_powergating_state(void *handle,
1521 enum amd_powergating_state state)
1526 static int smu_enable_umd_pstate(void *handle,
1527 enum amd_dpm_forced_level *level)
1529 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1530 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1531 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1532 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1534 struct smu_context *smu = (struct smu_context*)(handle);
1535 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1536 if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
1539 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1540 /* enter umd pstate, save current level, disable gfx cg*/
1541 if (*level & profile_mode_mask) {
1542 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1543 smu_dpm_ctx->enable_umd_pstate = true;
1544 amdgpu_device_ip_set_clockgating_state(smu->adev,
1545 AMD_IP_BLOCK_TYPE_GFX,
1546 AMD_CG_STATE_UNGATE);
1547 amdgpu_device_ip_set_powergating_state(smu->adev,
1548 AMD_IP_BLOCK_TYPE_GFX,
1549 AMD_PG_STATE_UNGATE);
1552 /* exit umd pstate, restore level, enable gfx cg*/
1553 if (!(*level & profile_mode_mask)) {
1554 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1555 *level = smu_dpm_ctx->saved_dpm_level;
1556 smu_dpm_ctx->enable_umd_pstate = false;
1557 amdgpu_device_ip_set_clockgating_state(smu->adev,
1558 AMD_IP_BLOCK_TYPE_GFX,
1560 amdgpu_device_ip_set_powergating_state(smu->adev,
1561 AMD_IP_BLOCK_TYPE_GFX,
1569 static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1572 uint32_t sclk_mask, mclk_mask, soc_mask;
1575 case AMD_DPM_FORCED_LEVEL_HIGH:
1576 ret = smu_force_dpm_limit_value(smu, true);
1578 case AMD_DPM_FORCED_LEVEL_LOW:
1579 ret = smu_force_dpm_limit_value(smu, false);
1581 case AMD_DPM_FORCED_LEVEL_AUTO:
1582 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1583 ret = smu_unforce_dpm_levels(smu);
1585 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1586 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1587 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1588 ret = smu_get_profiling_clk_mask(smu, level,
1594 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
1595 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
1596 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
1598 case AMD_DPM_FORCED_LEVEL_MANUAL:
1599 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1606 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1607 enum amd_dpm_forced_level level,
1608 bool skip_display_settings)
1613 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1615 if (!smu->pm_enabled)
1618 if (!skip_display_settings) {
1619 ret = smu_display_config_changed(smu);
1621 pr_err("Failed to change display config!");
1626 ret = smu_apply_clocks_adjust_rules(smu);
1628 pr_err("Failed to apply clocks adjust rules!");
1632 if (!skip_display_settings) {
1633 ret = smu_notify_smc_dispaly_config(smu);
1635 pr_err("Failed to notify smc display config!");
1640 if (smu_dpm_ctx->dpm_level != level) {
1641 ret = smu_asic_set_performance_level(smu, level);
1643 ret = smu_default_set_performance_level(smu, level);
1645 pr_err("Failed to set performance level!");
1650 /* update the saved copy */
1651 smu_dpm_ctx->dpm_level = level;
1654 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1655 index = fls(smu->workload_mask);
1656 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1657 workload = smu->workload_setting[index];
1659 if (smu->power_profile_mode != workload)
1660 smu_set_power_profile_mode(smu, &workload, 0);
1666 int smu_handle_task(struct smu_context *smu,
1667 enum amd_dpm_forced_level level,
1668 enum amd_pp_task task_id)
1673 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1674 ret = smu_pre_display_config_changed(smu);
1677 ret = smu_set_cpu_power_state(smu);
1680 ret = smu_adjust_power_state_dynamic(smu, level, false);
1682 case AMD_PP_TASK_COMPLETE_INIT:
1683 case AMD_PP_TASK_READJUST_POWER_STATE:
1684 ret = smu_adjust_power_state_dynamic(smu, level, true);
1693 int smu_switch_power_profile(struct smu_context *smu,
1694 enum PP_SMC_POWER_PROFILE type,
1697 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1701 if (!smu->pm_enabled)
1704 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1707 mutex_lock(&smu->mutex);
1710 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1711 index = fls(smu->workload_mask);
1712 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1713 workload = smu->workload_setting[index];
1715 smu->workload_mask |= (1 << smu->workload_prority[type]);
1716 index = fls(smu->workload_mask);
1717 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1718 workload = smu->workload_setting[index];
1721 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1722 smu_set_power_profile_mode(smu, &workload, 0);
1724 mutex_unlock(&smu->mutex);
1729 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1731 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1732 enum amd_dpm_forced_level level;
1734 if (!smu_dpm_ctx->dpm_context)
1737 mutex_lock(&(smu->mutex));
1738 level = smu_dpm_ctx->dpm_level;
1739 mutex_unlock(&(smu->mutex));
1744 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1746 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1749 if (!smu_dpm_ctx->dpm_context)
1752 ret = smu_enable_umd_pstate(smu, &level);
1756 ret = smu_handle_task(smu, level,
1757 AMD_PP_TASK_READJUST_POWER_STATE);
1762 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1766 mutex_lock(&smu->mutex);
1767 ret = smu_init_display_count(smu, count);
1768 mutex_unlock(&smu->mutex);
1773 int smu_force_clk_levels(struct smu_context *smu,
1774 enum smu_clk_type clk_type,
1777 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1780 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1781 pr_debug("force clock level is for dpm manual mode only.\n");
1785 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1786 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1791 const struct amd_ip_funcs smu_ip_funcs = {
1793 .early_init = smu_early_init,
1794 .late_init = smu_late_init,
1795 .sw_init = smu_sw_init,
1796 .sw_fini = smu_sw_fini,
1797 .hw_init = smu_hw_init,
1798 .hw_fini = smu_hw_fini,
1799 .suspend = smu_suspend,
1800 .resume = smu_resume,
1802 .check_soft_reset = NULL,
1803 .wait_for_idle = NULL,
1805 .set_clockgating_state = smu_set_clockgating_state,
1806 .set_powergating_state = smu_set_powergating_state,
1807 .enable_umd_pstate = smu_enable_umd_pstate,
1810 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1812 .type = AMD_IP_BLOCK_TYPE_SMC,
1816 .funcs = &smu_ip_funcs,
1819 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
1821 .type = AMD_IP_BLOCK_TYPE_SMC,
1825 .funcs = &smu_ip_funcs,