2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
26 #include "amdgpu_smu.h"
27 #include "atomfirmware.h"
28 #include "amdgpu_atomfirmware.h"
29 #include "smu_v11_0.h"
30 #include "smu11_driver_if.h"
31 #include "soc15_common.h"
33 #include "vega20_ppt.h"
34 #include "pp_thermal.h"
36 #include "asic_reg/thm/thm_11_0_2_offset.h"
37 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
38 #include "asic_reg/mp/mp_9_0_offset.h"
39 #include "asic_reg/mp/mp_9_0_sh_mask.h"
40 #include "asic_reg/nbio/nbio_7_4_offset.h"
41 #include "asic_reg/smuio/smuio_9_0_offset.h"
42 #include "asic_reg/smuio/smuio_9_0_sh_mask.h"
44 MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
46 #define SMU11_TOOL_SIZE 0x19000
47 #define SMU11_THERMAL_MINIMUM_ALERT_TEMP 0
48 #define SMU11_THERMAL_MAXIMUM_ALERT_TEMP 255
50 #define SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES 1000
51 #define SMU11_VOLTAGE_SCALE 4
53 #define SMC_DPM_FEATURE (FEATURE_DPM_PREFETCHER_MASK | \
54 FEATURE_DPM_GFXCLK_MASK | \
55 FEATURE_DPM_UCLK_MASK | \
56 FEATURE_DPM_SOCCLK_MASK | \
57 FEATURE_DPM_UVD_MASK | \
58 FEATURE_DPM_VCE_MASK | \
59 FEATURE_DPM_MP0CLK_MASK | \
60 FEATURE_DPM_LINK_MASK | \
61 FEATURE_DPM_DCEFCLK_MASK)
63 static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
66 struct amdgpu_device *adev = smu->adev;
67 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
71 static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
73 struct amdgpu_device *adev = smu->adev;
75 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
79 static int smu_v11_0_wait_for_response(struct smu_context *smu)
81 struct amdgpu_device *adev = smu->adev;
82 uint32_t cur_value, i;
84 for (i = 0; i < adev->usec_timeout; i++) {
85 cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
86 if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
91 /* timeout means wrong logic */
92 if (i == adev->usec_timeout)
95 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
98 static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
100 struct amdgpu_device *adev = smu->adev;
101 int ret = 0, index = 0;
103 index = smu_msg_get_index(smu, msg);
107 smu_v11_0_wait_for_response(smu);
109 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
111 smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
113 ret = smu_v11_0_wait_for_response(smu);
116 pr_err("Failed to send message 0x%x, response 0x%x\n", index,
124 smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
128 struct amdgpu_device *adev = smu->adev;
129 int ret = 0, index = 0;
131 index = smu_msg_get_index(smu, msg);
135 ret = smu_v11_0_wait_for_response(smu);
137 pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n",
140 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
142 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
144 smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
146 ret = smu_v11_0_wait_for_response(smu);
148 pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
154 static int smu_v11_0_init_microcode(struct smu_context *smu)
156 struct amdgpu_device *adev = smu->adev;
157 const char *chip_name;
160 const struct smc_firmware_header_v1_0 *hdr;
161 const struct common_firmware_header *header;
162 struct amdgpu_firmware_info *ucode = NULL;
164 switch (adev->asic_type) {
166 chip_name = "vega20";
172 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
174 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
177 err = amdgpu_ucode_validate(adev->pm.fw);
181 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
182 amdgpu_ucode_print_smc_hdr(&hdr->header);
183 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
185 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
186 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
187 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
188 ucode->fw = adev->pm.fw;
189 header = (const struct common_firmware_header *)ucode->fw->data;
190 adev->firmware.fw_size +=
191 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
196 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
198 release_firmware(adev->pm.fw);
204 static int smu_v11_0_load_microcode(struct smu_context *smu)
209 static int smu_v11_0_check_fw_status(struct smu_context *smu)
211 struct amdgpu_device *adev = smu->adev;
212 uint32_t mp1_fw_flags;
214 mp1_fw_flags = RREG32_PCIE(MP1_Public |
215 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
217 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
218 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
224 static int smu_v11_0_check_fw_version(struct smu_context *smu)
226 uint32_t if_version = 0xff, smu_version = 0xff;
228 uint8_t smu_minor, smu_debug;
231 ret = smu_get_smc_version(smu, &if_version, &smu_version);
235 smu_major = (smu_version >> 16) & 0xffff;
236 smu_minor = (smu_version >> 8) & 0xff;
237 smu_debug = (smu_version >> 0) & 0xff;
239 pr_info("SMU Driver IF Version = 0x%08x, SMU FW Version = 0x%08x (%d.%d.%d)\n",
240 if_version, smu_version, smu_major, smu_minor, smu_debug);
242 if (if_version != smu->smc_if_version) {
243 pr_err("SMU driver if version not matched\n");
250 static int smu_v11_0_read_pptable_from_vbios(struct smu_context *smu)
257 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
260 ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
265 if (!smu->smu_table.power_play_table)
266 smu->smu_table.power_play_table = table;
267 if (!smu->smu_table.power_play_table_size)
268 smu->smu_table.power_play_table_size = size;
273 static int smu_v11_0_init_dpm_context(struct smu_context *smu)
275 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
277 if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
280 return smu_alloc_dpm_context(smu);
283 static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
285 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
287 if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
290 kfree(smu_dpm->dpm_context);
291 kfree(smu_dpm->golden_dpm_context);
292 kfree(smu_dpm->dpm_current_power_state);
293 kfree(smu_dpm->dpm_request_power_state);
294 smu_dpm->dpm_context = NULL;
295 smu_dpm->golden_dpm_context = NULL;
296 smu_dpm->dpm_context_size = 0;
297 smu_dpm->dpm_current_power_state = NULL;
298 smu_dpm->dpm_request_power_state = NULL;
303 static int smu_v11_0_init_smc_tables(struct smu_context *smu)
305 struct smu_table_context *smu_table = &smu->smu_table;
306 struct smu_table *tables = NULL;
309 if (smu_table->tables || smu_table->table_count != 0)
312 tables = kcalloc(TABLE_COUNT, sizeof(struct smu_table), GFP_KERNEL);
316 smu_table->tables = tables;
317 smu_table->table_count = TABLE_COUNT;
319 SMU_TABLE_INIT(tables, TABLE_PPTABLE, sizeof(PPTable_t),
320 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
321 SMU_TABLE_INIT(tables, TABLE_WATERMARKS, sizeof(Watermarks_t),
322 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
323 SMU_TABLE_INIT(tables, TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
324 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
325 SMU_TABLE_INIT(tables, TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
326 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
327 SMU_TABLE_INIT(tables, TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, PAGE_SIZE,
328 AMDGPU_GEM_DOMAIN_VRAM);
329 SMU_TABLE_INIT(tables, TABLE_ACTIVITY_MONITOR_COEFF,
330 sizeof(DpmActivityMonitorCoeffInt_t),
332 AMDGPU_GEM_DOMAIN_VRAM);
334 ret = smu_v11_0_init_dpm_context(smu);
341 static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
343 struct smu_table_context *smu_table = &smu->smu_table;
346 if (!smu_table->tables || smu_table->table_count == 0)
349 kfree(smu_table->tables);
350 smu_table->tables = NULL;
351 smu_table->table_count = 0;
353 ret = smu_v11_0_fini_dpm_context(smu);
359 static int smu_v11_0_init_power(struct smu_context *smu)
361 struct smu_power_context *smu_power = &smu->smu_power;
363 if (!smu->pm_enabled)
365 if (smu_power->power_context || smu_power->power_context_size != 0)
368 smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
370 if (!smu_power->power_context)
372 smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
374 smu->metrics_time = 0;
375 smu->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
376 if (!smu->metrics_table) {
377 kfree(smu_power->power_context);
384 static int smu_v11_0_fini_power(struct smu_context *smu)
386 struct smu_power_context *smu_power = &smu->smu_power;
388 if (!smu->pm_enabled)
390 if (!smu_power->power_context || smu_power->power_context_size == 0)
393 kfree(smu->metrics_table);
394 kfree(smu_power->power_context);
395 smu->metrics_table = NULL;
396 smu_power->power_context = NULL;
397 smu_power->power_context_size = 0;
402 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
407 struct atom_common_table_header *header;
408 struct atom_firmware_info_v3_3 *v_3_3;
409 struct atom_firmware_info_v3_1 *v_3_1;
411 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
414 ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
415 (uint8_t **)&header);
419 if (header->format_revision != 3) {
420 pr_err("unknown atom_firmware_info version! for smu11\n");
424 switch (header->content_revision) {
428 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
429 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
430 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
431 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
432 smu->smu_table.boot_values.socclk = 0;
433 smu->smu_table.boot_values.dcefclk = 0;
434 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
435 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
436 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
437 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
438 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
439 smu->smu_table.boot_values.pp_table_id = 0;
443 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
444 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
445 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
446 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
447 smu->smu_table.boot_values.socclk = 0;
448 smu->smu_table.boot_values.dcefclk = 0;
449 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
450 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
451 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
452 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
453 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
454 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
460 static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
463 struct amdgpu_device *adev = smu->adev;
464 struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
465 struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
467 input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
468 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
469 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
472 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
477 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
478 smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
480 memset(&input, 0, sizeof(input));
481 input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
482 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
483 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
486 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
491 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
492 smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
494 memset(&input, 0, sizeof(input));
495 input.clk_id = SMU11_SYSPLL0_ECLK_ID;
496 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
497 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
500 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
505 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
506 smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
508 memset(&input, 0, sizeof(input));
509 input.clk_id = SMU11_SYSPLL0_VCLK_ID;
510 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
511 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
514 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
519 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
520 smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
522 memset(&input, 0, sizeof(input));
523 input.clk_id = SMU11_SYSPLL0_DCLK_ID;
524 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
525 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
528 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
533 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
534 smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
539 static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
541 struct smu_table_context *smu_table = &smu->smu_table;
542 struct smu_table *memory_pool = &smu_table->memory_pool;
545 uint32_t address_low, address_high;
547 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
550 address = (uintptr_t)memory_pool->cpu_addr;
551 address_high = (uint32_t)upper_32_bits(address);
552 address_low = (uint32_t)lower_32_bits(address);
554 ret = smu_send_smc_msg_with_param(smu,
555 SMU_MSG_SetSystemVirtualDramAddrHigh,
559 ret = smu_send_smc_msg_with_param(smu,
560 SMU_MSG_SetSystemVirtualDramAddrLow,
565 address = memory_pool->mc_address;
566 address_high = (uint32_t)upper_32_bits(address);
567 address_low = (uint32_t)lower_32_bits(address);
569 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
573 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
577 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
578 (uint32_t)memory_pool->size);
585 static int smu_v11_0_check_pptable(struct smu_context *smu)
589 ret = smu_check_powerplay_table(smu);
593 static int smu_v11_0_parse_pptable(struct smu_context *smu)
597 struct smu_table_context *table_context = &smu->smu_table;
599 if (table_context->driver_pptable)
602 table_context->driver_pptable = kzalloc(sizeof(PPTable_t), GFP_KERNEL);
604 if (!table_context->driver_pptable)
607 ret = smu_store_powerplay_table(smu);
611 ret = smu_append_powerplay_table(smu);
616 static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
620 ret = smu_set_default_dpm_table(smu);
625 static int smu_v11_0_write_pptable(struct smu_context *smu)
627 struct smu_table_context *table_context = &smu->smu_table;
630 ret = smu_update_table(smu, TABLE_PPTABLE, table_context->driver_pptable, true);
635 static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
637 return smu_update_table(smu, TABLE_WATERMARKS,
638 smu->smu_table.tables[TABLE_WATERMARKS].cpu_addr, true);
641 static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
645 ret = smu_send_smc_msg_with_param(smu,
646 SMU_MSG_SetMinDeepSleepDcefclk, clk);
648 pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
653 static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
655 struct smu_table_context *table_context = &smu->smu_table;
657 if (!smu->pm_enabled)
662 return smu_set_deep_sleep_dcefclk(smu,
663 table_context->boot_values.dcefclk / 100);
666 static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
669 struct smu_table *tool_table = &smu->smu_table.tables[TABLE_PMSTATUSLOG];
671 if (tool_table->mc_address) {
672 ret = smu_send_smc_msg_with_param(smu,
673 SMU_MSG_SetToolsDramAddrHigh,
674 upper_32_bits(tool_table->mc_address));
676 ret = smu_send_smc_msg_with_param(smu,
677 SMU_MSG_SetToolsDramAddrLow,
678 lower_32_bits(tool_table->mc_address));
684 static int smu_v11_0_init_display(struct smu_context *smu)
688 if (!smu->pm_enabled)
690 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
694 static int smu_v11_0_update_feature_enable_state(struct smu_context *smu, uint32_t feature_id, bool enabled)
696 uint32_t feature_low = 0, feature_high = 0;
699 if (!smu->pm_enabled)
701 if (feature_id >= 0 && feature_id < 31)
702 feature_low = (1 << feature_id);
703 else if (feature_id > 31 && feature_id < 63)
704 feature_high = (1 << feature_id);
709 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
713 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
719 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
723 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
733 static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
735 struct smu_feature *feature = &smu->smu_feature;
737 uint32_t feature_mask[2];
739 mutex_lock(&feature->mutex);
740 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
743 bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
745 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
750 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
756 mutex_unlock(&feature->mutex);
760 static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
761 uint32_t *feature_mask, uint32_t num)
763 uint32_t feature_mask_high = 0, feature_mask_low = 0;
766 if (!feature_mask || num < 2)
769 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
772 ret = smu_read_smc_arg(smu, &feature_mask_high);
776 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
779 ret = smu_read_smc_arg(smu, &feature_mask_low);
783 feature_mask[0] = feature_mask_low;
784 feature_mask[1] = feature_mask_high;
789 static bool smu_v11_0_is_dpm_running(struct smu_context *smu)
792 uint32_t feature_mask[2];
793 unsigned long feature_enabled;
794 ret = smu_v11_0_get_enabled_mask(smu, feature_mask, 2);
795 feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
796 ((uint64_t)feature_mask[1] << 32));
797 return !!(feature_enabled & SMC_DPM_FEATURE);
800 static int smu_v11_0_system_features_control(struct smu_context *smu,
803 struct smu_feature *feature = &smu->smu_feature;
804 uint32_t feature_mask[2];
807 if (smu->pm_enabled) {
808 ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
809 SMU_MSG_DisableAllSmuFeatures));
814 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
818 bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
819 feature->feature_num);
820 bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
821 feature->feature_num);
826 static int smu_v11_0_notify_display_change(struct smu_context *smu)
830 if (!smu->pm_enabled)
832 if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT))
833 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
839 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
840 PPCLK_e clock_select)
844 if (!smu->pm_enabled)
846 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
849 pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
853 ret = smu_read_smc_arg(smu, clock);
860 /* if DC limit is zero, return AC limit */
861 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
864 pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
868 ret = smu_read_smc_arg(smu, clock);
873 static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
875 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
878 max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
880 smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
882 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
883 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
884 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
885 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
886 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
887 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
889 if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
890 ret = smu_v11_0_get_max_sustainable_clock(smu,
891 &(max_sustainable_clocks->uclock),
894 pr_err("[%s] failed to get max UCLK from SMC!",
900 if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
901 ret = smu_v11_0_get_max_sustainable_clock(smu,
902 &(max_sustainable_clocks->soc_clock),
905 pr_err("[%s] failed to get max SOCCLK from SMC!",
911 if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
912 ret = smu_v11_0_get_max_sustainable_clock(smu,
913 &(max_sustainable_clocks->dcef_clock),
916 pr_err("[%s] failed to get max DCEFCLK from SMC!",
921 ret = smu_v11_0_get_max_sustainable_clock(smu,
922 &(max_sustainable_clocks->display_clock),
925 pr_err("[%s] failed to get max DISPCLK from SMC!",
929 ret = smu_v11_0_get_max_sustainable_clock(smu,
930 &(max_sustainable_clocks->phy_clock),
933 pr_err("[%s] failed to get max PHYCLK from SMC!",
937 ret = smu_v11_0_get_max_sustainable_clock(smu,
938 &(max_sustainable_clocks->pixel_clock),
941 pr_err("[%s] failed to get max PIXCLK from SMC!",
947 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
948 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
953 static int smu_v11_0_get_power_limit(struct smu_context *smu,
960 mutex_lock(&smu->mutex);
961 *limit = smu->default_power_limit;
962 if (smu->od_enabled) {
963 *limit *= (100 + smu->smu_table.TDPODLimit);
966 mutex_unlock(&smu->mutex);
968 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
969 POWER_SOURCE_AC << 16);
971 pr_err("[%s] get PPT limit failed!", __func__);
974 smu_read_smc_arg(smu, limit);
975 smu->power_limit = *limit;
981 static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
983 uint32_t max_power_limit;
987 n = smu->default_power_limit;
989 max_power_limit = smu->default_power_limit;
991 if (smu->od_enabled) {
992 max_power_limit *= (100 + smu->smu_table.TDPODLimit);
993 max_power_limit /= 100;
996 if (smu_feature_is_enabled(smu, FEATURE_PPT_BIT))
997 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
999 pr_err("[%s] Set power limit Failed!", __func__);
1006 static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, uint32_t clk_id, uint32_t *value)
1011 if (clk_id >= PPCLK_COUNT || !value)
1014 ret = smu_send_smc_msg_with_param(smu,
1015 SMU_MSG_GetDpmClockFreq, (clk_id << 16));
1019 ret = smu_read_smc_arg(smu, &freq);
1029 static int smu_v11_0_get_thermal_range(struct smu_context *smu,
1030 struct PP_TemperatureRange *range)
1032 PPTable_t *pptable = smu->smu_table.driver_pptable;
1033 memcpy(range, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
1035 range->max = pptable->TedgeLimit *
1036 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1037 range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
1038 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1039 range->hotspot_crit_max = pptable->ThotspotLimit *
1040 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1041 range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
1042 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1043 range->mem_crit_max = pptable->ThbmLimit *
1044 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1045 range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)*
1046 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1051 static int smu_v11_0_set_thermal_range(struct smu_context *smu,
1052 struct PP_TemperatureRange *range)
1054 struct amdgpu_device *adev = smu->adev;
1055 int low = SMU11_THERMAL_MINIMUM_ALERT_TEMP *
1056 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1057 int high = SMU11_THERMAL_MAXIMUM_ALERT_TEMP *
1058 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1061 if (low < range->min)
1063 if (high > range->max)
1069 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1070 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1071 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1072 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
1073 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
1074 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1076 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1081 static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
1083 struct amdgpu_device *adev = smu->adev;
1086 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1087 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1088 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1090 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
1095 static int smu_v11_0_set_thermal_fan_table(struct smu_context *smu)
1098 struct smu_table_context *table_context = &smu->smu_table;
1099 PPTable_t *pptable = table_context->driver_pptable;
1101 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget,
1102 (uint32_t)pptable->FanTargetTemperature);
1107 static int smu_v11_0_start_thermal_control(struct smu_context *smu)
1110 struct PP_TemperatureRange range = {
1120 struct amdgpu_device *adev = smu->adev;
1122 if (!smu->pm_enabled)
1124 smu_v11_0_get_thermal_range(smu, &range);
1126 if (smu->smu_table.thermal_controller_type) {
1127 ret = smu_v11_0_set_thermal_range(smu, &range);
1131 ret = smu_v11_0_enable_thermal_alert(smu);
1134 ret = smu_v11_0_set_thermal_fan_table(smu);
1139 adev->pm.dpm.thermal.min_temp = range.min;
1140 adev->pm.dpm.thermal.max_temp = range.max;
1141 adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
1142 adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
1143 adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
1144 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
1145 adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
1146 adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
1147 adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
1152 static int smu_v11_0_get_metrics_table(struct smu_context *smu,
1153 SmuMetrics_t *metrics_table)
1157 if (!smu->metrics_time || time_after(jiffies, smu->metrics_time + HZ / 1000)) {
1158 ret = smu_update_table(smu, TABLE_SMU_METRICS,
1159 (void *)metrics_table, false);
1161 pr_info("Failed to export SMU metrics table!\n");
1164 memcpy(smu->metrics_table, metrics_table, sizeof(SmuMetrics_t));
1165 smu->metrics_time = jiffies;
1167 memcpy(metrics_table, smu->metrics_table, sizeof(SmuMetrics_t));
1172 static int smu_v11_0_get_current_activity_percent(struct smu_context *smu,
1173 enum amd_pp_sensors sensor,
1177 SmuMetrics_t metrics;
1182 ret = smu_v11_0_get_metrics_table(smu, &metrics);
1187 case AMDGPU_PP_SENSOR_GPU_LOAD:
1188 *value = metrics.AverageGfxActivity;
1190 case AMDGPU_PP_SENSOR_MEM_LOAD:
1191 *value = metrics.AverageUclkActivity;
1194 pr_err("Invalid sensor for retrieving clock activity\n");
1201 static int smu_v11_0_thermal_get_temperature(struct smu_context *smu,
1202 enum amd_pp_sensors sensor,
1205 struct amdgpu_device *adev = smu->adev;
1206 SmuMetrics_t metrics;
1213 ret = smu_v11_0_get_metrics_table(smu, &metrics);
1218 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1219 temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
1220 temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
1221 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
1223 temp = temp & 0x1ff;
1224 temp *= SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES;
1228 case AMDGPU_PP_SENSOR_EDGE_TEMP:
1229 *value = metrics.TemperatureEdge *
1230 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1232 case AMDGPU_PP_SENSOR_MEM_TEMP:
1233 *value = metrics.TemperatureHBM *
1234 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1237 pr_err("Invalid sensor for retrieving temp\n");
1244 static int smu_v11_0_get_gpu_power(struct smu_context *smu, uint32_t *value)
1247 SmuMetrics_t metrics;
1252 ret = smu_v11_0_get_metrics_table(smu, &metrics);
1256 *value = metrics.CurrSocketPower << 8;
1261 static uint16_t convert_to_vddc(uint8_t vid)
1263 return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
1266 static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1268 struct amdgpu_device *adev = smu->adev;
1269 uint32_t vdd = 0, val_vid = 0;
1273 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
1274 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1275 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1277 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1285 static int smu_v11_0_read_sensor(struct smu_context *smu,
1286 enum amd_pp_sensors sensor,
1287 void *data, uint32_t *size)
1289 struct smu_table_context *table_context = &smu->smu_table;
1290 PPTable_t *pptable = table_context->driver_pptable;
1293 case AMDGPU_PP_SENSOR_GPU_LOAD:
1294 case AMDGPU_PP_SENSOR_MEM_LOAD:
1295 ret = smu_v11_0_get_current_activity_percent(smu,
1300 case AMDGPU_PP_SENSOR_GFX_MCLK:
1301 ret = smu_get_current_clk_freq(smu, PPCLK_UCLK, (uint32_t *)data);
1304 case AMDGPU_PP_SENSOR_GFX_SCLK:
1305 ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, (uint32_t *)data);
1308 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1309 case AMDGPU_PP_SENSOR_EDGE_TEMP:
1310 case AMDGPU_PP_SENSOR_MEM_TEMP:
1311 ret = smu_v11_0_thermal_get_temperature(smu, sensor, (uint32_t *)data);
1314 case AMDGPU_PP_SENSOR_GPU_POWER:
1315 ret = smu_v11_0_get_gpu_power(smu, (uint32_t *)data);
1318 case AMDGPU_PP_SENSOR_VDDGFX:
1319 ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
1322 case AMDGPU_PP_SENSOR_UVD_POWER:
1323 *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT) ? 1 : 0;
1326 case AMDGPU_PP_SENSOR_VCE_POWER:
1327 *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT) ? 1 : 0;
1330 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
1331 *(uint32_t *)data = 0;
1334 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
1335 *(uint32_t *)data = pptable->FanMaximumRpm;
1339 ret = smu_common_read_sensor(smu, sensor, data, size);
1350 smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
1351 struct pp_display_clock_request
1354 enum amd_pp_clock_type clk_type = clock_req->clock_type;
1356 PPCLK_e clk_select = 0;
1357 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1359 if (!smu->pm_enabled)
1361 if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
1363 case amd_pp_dcef_clock:
1364 clk_select = PPCLK_DCEFCLK;
1366 case amd_pp_disp_clock:
1367 clk_select = PPCLK_DISPCLK;
1369 case amd_pp_pixel_clock:
1370 clk_select = PPCLK_PIXCLK;
1372 case amd_pp_phy_clock:
1373 clk_select = PPCLK_PHYCLK;
1376 pr_info("[%s] Invalid Clock Type!", __func__);
1384 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1385 (clk_select << 16) | clk_freq);
1392 static int smu_v11_0_set_watermarks_table(struct smu_context *smu,
1393 Watermarks_t *table, struct
1394 dm_pp_wm_sets_with_clock_ranges_soc15
1399 if (!table || !clock_ranges)
1402 if (clock_ranges->num_wm_dmif_sets > 4 ||
1403 clock_ranges->num_wm_mcif_sets > 4)
1406 for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
1407 table->WatermarkRow[1][i].MinClock =
1408 cpu_to_le16((uint16_t)
1409 (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
1411 table->WatermarkRow[1][i].MaxClock =
1412 cpu_to_le16((uint16_t)
1413 (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
1415 table->WatermarkRow[1][i].MinUclk =
1416 cpu_to_le16((uint16_t)
1417 (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1419 table->WatermarkRow[1][i].MaxUclk =
1420 cpu_to_le16((uint16_t)
1421 (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1423 table->WatermarkRow[1][i].WmSetting = (uint8_t)
1424 clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
1427 for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
1428 table->WatermarkRow[0][i].MinClock =
1429 cpu_to_le16((uint16_t)
1430 (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
1432 table->WatermarkRow[0][i].MaxClock =
1433 cpu_to_le16((uint16_t)
1434 (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
1436 table->WatermarkRow[0][i].MinUclk =
1437 cpu_to_le16((uint16_t)
1438 (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1440 table->WatermarkRow[0][i].MaxUclk =
1441 cpu_to_le16((uint16_t)
1442 (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1444 table->WatermarkRow[0][i].WmSetting = (uint8_t)
1445 clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
1452 smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
1453 dm_pp_wm_sets_with_clock_ranges_soc15
1457 struct smu_table *watermarks = &smu->smu_table.tables[TABLE_WATERMARKS];
1458 Watermarks_t *table = watermarks->cpu_addr;
1460 if (!smu->disable_watermark &&
1461 smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT) &&
1462 smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
1463 smu_v11_0_set_watermarks_table(smu, table, clock_ranges);
1464 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1465 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1471 static int smu_v11_0_get_clock_ranges(struct smu_context *smu,
1473 PPCLK_e clock_select,
1479 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
1480 (clock_select << 16));
1482 pr_err("[GetClockRanges] Failed to get max clock from SMC!\n");
1485 smu_read_smc_arg(smu, clock);
1487 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq,
1488 (clock_select << 16));
1490 pr_err("[GetClockRanges] Failed to get min clock from SMC!\n");
1493 smu_read_smc_arg(smu, clock);
1499 static uint32_t smu_v11_0_dpm_get_sclk(struct smu_context *smu, bool low)
1504 if (!smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
1505 pr_err("[GetSclks]: gfxclk dpm not enabled!\n");
1510 ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, false);
1512 pr_err("[GetSclks]: fail to get min PPCLK_GFXCLK\n");
1516 ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, true);
1518 pr_err("[GetSclks]: fail to get max PPCLK_GFXCLK\n");
1523 return (gfx_clk * 100);
1526 static uint32_t smu_v11_0_dpm_get_mclk(struct smu_context *smu, bool low)
1531 if (!smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
1532 pr_err("[GetMclks]: memclk dpm not enabled!\n");
1537 ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_UCLK, false);
1539 pr_err("[GetMclks]: fail to get min PPCLK_UCLK\n");
1543 ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_GFXCLK, true);
1545 pr_err("[GetMclks]: fail to get max PPCLK_UCLK\n");
1550 return (mem_clk * 100);
1553 static int smu_v11_0_set_od8_default_settings(struct smu_context *smu,
1556 struct smu_table_context *table_context = &smu->smu_table;
1560 if (table_context->overdrive_table)
1563 table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL);
1565 if (!table_context->overdrive_table)
1568 ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, false);
1570 pr_err("Failed to export over drive table!\n");
1574 smu_set_default_od8_settings(smu);
1577 ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, true);
1579 pr_err("Failed to import over drive table!\n");
1586 static int smu_v11_0_conv_power_profile_to_pplib_workload(int power_profile)
1588 int pplib_workload = 0;
1590 switch (power_profile) {
1591 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
1592 pplib_workload = WORKLOAD_DEFAULT_BIT;
1594 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
1595 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
1597 case PP_SMC_POWER_PROFILE_POWERSAVING:
1598 pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
1600 case PP_SMC_POWER_PROFILE_VIDEO:
1601 pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
1603 case PP_SMC_POWER_PROFILE_VR:
1604 pplib_workload = WORKLOAD_PPLIB_VR_BIT;
1606 case PP_SMC_POWER_PROFILE_COMPUTE:
1607 pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
1609 case PP_SMC_POWER_PROFILE_CUSTOM:
1610 pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
1614 return pplib_workload;
1617 static int smu_v11_0_get_power_profile_mode(struct smu_context *smu, char *buf)
1619 DpmActivityMonitorCoeffInt_t activity_monitor;
1620 uint32_t i, size = 0;
1621 uint16_t workload_type = 0;
1622 static const char *profile_name[] = {
1630 static const char *title[] = {
1631 "PROFILE_INDEX(NAME)",
1635 "MinActiveFreqType",
1640 "PD_Data_error_coeff",
1641 "PD_Data_error_rate_coeff"};
1644 if (!smu->pm_enabled || !buf)
1647 size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
1648 title[0], title[1], title[2], title[3], title[4], title[5],
1649 title[6], title[7], title[8], title[9], title[10]);
1651 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1652 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1653 workload_type = smu_v11_0_conv_power_profile_to_pplib_workload(i);
1654 result = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
1655 workload_type, &activity_monitor, false);
1657 pr_err("[%s] Failed to get activity monitor!", __func__);
1661 size += sprintf(buf + size, "%2d %14s%s:\n",
1662 i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1664 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1668 activity_monitor.Gfx_FPS,
1669 activity_monitor.Gfx_UseRlcBusy,
1670 activity_monitor.Gfx_MinActiveFreqType,
1671 activity_monitor.Gfx_MinActiveFreq,
1672 activity_monitor.Gfx_BoosterFreqType,
1673 activity_monitor.Gfx_BoosterFreq,
1674 activity_monitor.Gfx_PD_Data_limit_c,
1675 activity_monitor.Gfx_PD_Data_error_coeff,
1676 activity_monitor.Gfx_PD_Data_error_rate_coeff);
1678 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1682 activity_monitor.Soc_FPS,
1683 activity_monitor.Soc_UseRlcBusy,
1684 activity_monitor.Soc_MinActiveFreqType,
1685 activity_monitor.Soc_MinActiveFreq,
1686 activity_monitor.Soc_BoosterFreqType,
1687 activity_monitor.Soc_BoosterFreq,
1688 activity_monitor.Soc_PD_Data_limit_c,
1689 activity_monitor.Soc_PD_Data_error_coeff,
1690 activity_monitor.Soc_PD_Data_error_rate_coeff);
1692 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1696 activity_monitor.Mem_FPS,
1697 activity_monitor.Mem_UseRlcBusy,
1698 activity_monitor.Mem_MinActiveFreqType,
1699 activity_monitor.Mem_MinActiveFreq,
1700 activity_monitor.Mem_BoosterFreqType,
1701 activity_monitor.Mem_BoosterFreq,
1702 activity_monitor.Mem_PD_Data_limit_c,
1703 activity_monitor.Mem_PD_Data_error_coeff,
1704 activity_monitor.Mem_PD_Data_error_rate_coeff);
1706 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1710 activity_monitor.Fclk_FPS,
1711 activity_monitor.Fclk_UseRlcBusy,
1712 activity_monitor.Fclk_MinActiveFreqType,
1713 activity_monitor.Fclk_MinActiveFreq,
1714 activity_monitor.Fclk_BoosterFreqType,
1715 activity_monitor.Fclk_BoosterFreq,
1716 activity_monitor.Fclk_PD_Data_limit_c,
1717 activity_monitor.Fclk_PD_Data_error_coeff,
1718 activity_monitor.Fclk_PD_Data_error_rate_coeff);
1724 static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1726 DpmActivityMonitorCoeffInt_t activity_monitor;
1727 int workload_type = 0, ret = 0;
1729 smu->power_profile_mode = input[size];
1731 if (!smu->pm_enabled)
1733 if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
1734 pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
1738 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1739 ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
1740 WORKLOAD_PPLIB_CUSTOM_BIT, &activity_monitor, false);
1742 pr_err("[%s] Failed to get activity monitor!", __func__);
1747 case 0: /* Gfxclk */
1748 activity_monitor.Gfx_FPS = input[1];
1749 activity_monitor.Gfx_UseRlcBusy = input[2];
1750 activity_monitor.Gfx_MinActiveFreqType = input[3];
1751 activity_monitor.Gfx_MinActiveFreq = input[4];
1752 activity_monitor.Gfx_BoosterFreqType = input[5];
1753 activity_monitor.Gfx_BoosterFreq = input[6];
1754 activity_monitor.Gfx_PD_Data_limit_c = input[7];
1755 activity_monitor.Gfx_PD_Data_error_coeff = input[8];
1756 activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
1758 case 1: /* Socclk */
1759 activity_monitor.Soc_FPS = input[1];
1760 activity_monitor.Soc_UseRlcBusy = input[2];
1761 activity_monitor.Soc_MinActiveFreqType = input[3];
1762 activity_monitor.Soc_MinActiveFreq = input[4];
1763 activity_monitor.Soc_BoosterFreqType = input[5];
1764 activity_monitor.Soc_BoosterFreq = input[6];
1765 activity_monitor.Soc_PD_Data_limit_c = input[7];
1766 activity_monitor.Soc_PD_Data_error_coeff = input[8];
1767 activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
1770 activity_monitor.Mem_FPS = input[1];
1771 activity_monitor.Mem_UseRlcBusy = input[2];
1772 activity_monitor.Mem_MinActiveFreqType = input[3];
1773 activity_monitor.Mem_MinActiveFreq = input[4];
1774 activity_monitor.Mem_BoosterFreqType = input[5];
1775 activity_monitor.Mem_BoosterFreq = input[6];
1776 activity_monitor.Mem_PD_Data_limit_c = input[7];
1777 activity_monitor.Mem_PD_Data_error_coeff = input[8];
1778 activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
1781 activity_monitor.Fclk_FPS = input[1];
1782 activity_monitor.Fclk_UseRlcBusy = input[2];
1783 activity_monitor.Fclk_MinActiveFreqType = input[3];
1784 activity_monitor.Fclk_MinActiveFreq = input[4];
1785 activity_monitor.Fclk_BoosterFreqType = input[5];
1786 activity_monitor.Fclk_BoosterFreq = input[6];
1787 activity_monitor.Fclk_PD_Data_limit_c = input[7];
1788 activity_monitor.Fclk_PD_Data_error_coeff = input[8];
1789 activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
1793 ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
1794 WORKLOAD_PPLIB_COMPUTE_BIT, &activity_monitor, true);
1796 pr_err("[%s] Failed to set activity monitor!", __func__);
1801 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1803 smu_v11_0_conv_power_profile_to_pplib_workload(smu->power_profile_mode);
1804 smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1805 1 << workload_type);
1810 static int smu_v11_0_update_od8_settings(struct smu_context *smu,
1814 struct smu_table_context *table_context = &smu->smu_table;
1817 ret = smu_update_table(smu, TABLE_OVERDRIVE,
1818 table_context->overdrive_table, false);
1820 pr_err("Failed to export over drive table!\n");
1824 smu_update_specified_od8_value(smu, index, value);
1826 ret = smu_update_table(smu, TABLE_OVERDRIVE,
1827 table_context->overdrive_table, true);
1829 pr_err("Failed to import over drive table!\n");
1836 static int smu_v11_0_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
1838 if (!smu_feature_is_supported(smu, FEATURE_DPM_UVD_BIT))
1841 if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT))
1844 return smu_feature_set_enabled(smu, FEATURE_DPM_UVD_BIT, enable);
1847 static int smu_v11_0_dpm_set_vce_enable(struct smu_context *smu, bool enable)
1849 if (!smu_feature_is_supported(smu, FEATURE_DPM_VCE_BIT))
1852 if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT))
1855 return smu_feature_set_enabled(smu, FEATURE_DPM_VCE_BIT, enable);
1858 static int smu_v11_0_get_current_rpm(struct smu_context *smu,
1859 uint32_t *current_rpm)
1863 ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
1866 pr_err("Attempt to get current RPM from SMC Failed!\n");
1870 smu_read_smc_arg(smu, current_rpm);
1876 smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1878 if (!smu_feature_is_enabled(smu, FEATURE_FAN_CONTROL_BIT))
1879 return AMD_FAN_CTRL_MANUAL;
1881 return AMD_FAN_CTRL_AUTO;
1885 smu_v11_0_get_fan_speed_percent(struct smu_context *smu,
1889 uint32_t percent = 0;
1890 uint32_t current_rpm;
1891 PPTable_t *pptable = smu->smu_table.driver_pptable;
1893 ret = smu_v11_0_get_current_rpm(smu, ¤t_rpm);
1894 percent = current_rpm * 100 / pptable->FanMaximumRpm;
1895 *speed = percent > 100 ? 100 : percent;
1901 smu_v11_0_smc_fan_control(struct smu_context *smu, bool start)
1905 if (smu_feature_is_supported(smu, FEATURE_FAN_CONTROL_BIT))
1908 ret = smu_feature_set_enabled(smu, FEATURE_FAN_CONTROL_BIT, start);
1910 pr_err("[%s]%s smc FAN CONTROL feature failed!",
1911 __func__, (start ? "Start" : "Stop"));
1917 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1919 struct amdgpu_device *adev = smu->adev;
1921 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1922 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1923 CG_FDO_CTRL2, TMIN, 0));
1924 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1925 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1926 CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1932 smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
1934 struct amdgpu_device *adev = smu->adev;
1943 if (smu_v11_0_smc_fan_control(smu, stop))
1945 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
1946 CG_FDO_CTRL1, FMAX_DUTY100);
1950 tmp64 = (uint64_t)speed * duty100;
1952 duty = (uint32_t)tmp64;
1954 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
1955 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
1956 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1958 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1962 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
1970 case AMD_FAN_CTRL_NONE:
1971 ret = smu_v11_0_set_fan_speed_percent(smu, 100);
1973 case AMD_FAN_CTRL_MANUAL:
1974 ret = smu_v11_0_smc_fan_control(smu, stop);
1976 case AMD_FAN_CTRL_AUTO:
1977 ret = smu_v11_0_smc_fan_control(smu, start);
1984 pr_err("[%s]Set fan control mode failed!", __func__);
1991 static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
1994 struct amdgpu_device *adev = smu->adev;
1996 uint32_t tach_period, crystal_clock_freq;
2002 mutex_lock(&(smu->mutex));
2003 ret = smu_v11_0_smc_fan_control(smu, stop);
2005 goto set_fan_speed_rpm_failed;
2007 crystal_clock_freq = amdgpu_asic_get_xclk(adev);
2008 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
2009 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
2010 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
2011 CG_TACH_CTRL, TARGET_PERIOD,
2014 ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
2016 set_fan_speed_rpm_failed:
2017 mutex_unlock(&(smu->mutex));
2021 static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
2025 mutex_lock(&(smu->mutex));
2026 ret = smu_send_smc_msg_with_param(smu,
2027 SMU_MSG_SetXgmiMode,
2028 pstate ? XGMI_STATE_D0 : XGMI_STATE_D3);
2029 mutex_unlock(&(smu->mutex));
2033 static const struct smu_funcs smu_v11_0_funcs = {
2034 .init_microcode = smu_v11_0_init_microcode,
2035 .load_microcode = smu_v11_0_load_microcode,
2036 .check_fw_status = smu_v11_0_check_fw_status,
2037 .check_fw_version = smu_v11_0_check_fw_version,
2038 .send_smc_msg = smu_v11_0_send_msg,
2039 .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
2040 .read_smc_arg = smu_v11_0_read_arg,
2041 .read_pptable_from_vbios = smu_v11_0_read_pptable_from_vbios,
2042 .init_smc_tables = smu_v11_0_init_smc_tables,
2043 .fini_smc_tables = smu_v11_0_fini_smc_tables,
2044 .init_power = smu_v11_0_init_power,
2045 .fini_power = smu_v11_0_fini_power,
2046 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
2047 .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
2048 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
2049 .check_pptable = smu_v11_0_check_pptable,
2050 .parse_pptable = smu_v11_0_parse_pptable,
2051 .populate_smc_pptable = smu_v11_0_populate_smc_pptable,
2052 .write_pptable = smu_v11_0_write_pptable,
2053 .write_watermarks_table = smu_v11_0_write_watermarks_table,
2054 .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
2055 .set_tool_table_location = smu_v11_0_set_tool_table_location,
2056 .init_display = smu_v11_0_init_display,
2057 .set_allowed_mask = smu_v11_0_set_allowed_mask,
2058 .get_enabled_mask = smu_v11_0_get_enabled_mask,
2059 .is_dpm_running = smu_v11_0_is_dpm_running,
2060 .system_features_control = smu_v11_0_system_features_control,
2061 .update_feature_enable_state = smu_v11_0_update_feature_enable_state,
2062 .notify_display_change = smu_v11_0_notify_display_change,
2063 .get_power_limit = smu_v11_0_get_power_limit,
2064 .set_power_limit = smu_v11_0_set_power_limit,
2065 .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
2066 .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
2067 .start_thermal_control = smu_v11_0_start_thermal_control,
2068 .read_sensor = smu_v11_0_read_sensor,
2069 .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
2070 .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
2071 .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
2072 .get_sclk = smu_v11_0_dpm_get_sclk,
2073 .get_mclk = smu_v11_0_dpm_get_mclk,
2074 .set_od8_default_settings = smu_v11_0_set_od8_default_settings,
2075 .conv_power_profile_to_pplib_workload = smu_v11_0_conv_power_profile_to_pplib_workload,
2076 .get_power_profile_mode = smu_v11_0_get_power_profile_mode,
2077 .set_power_profile_mode = smu_v11_0_set_power_profile_mode,
2078 .update_od8_settings = smu_v11_0_update_od8_settings,
2079 .dpm_set_uvd_enable = smu_v11_0_dpm_set_uvd_enable,
2080 .dpm_set_vce_enable = smu_v11_0_dpm_set_vce_enable,
2081 .get_current_rpm = smu_v11_0_get_current_rpm,
2082 .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
2083 .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
2084 .get_fan_speed_percent = smu_v11_0_get_fan_speed_percent,
2085 .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
2086 .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
2087 .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
2090 void smu_v11_0_set_smu_funcs(struct smu_context *smu)
2092 struct amdgpu_device *adev = smu->adev;
2094 smu->funcs = &smu_v11_0_funcs;
2095 switch (adev->asic_type) {
2097 vega20_set_ppt_funcs(smu);
2100 pr_warn("Unknown asic for smu11\n");