2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
26 #include "amdgpu_smu.h"
27 #include "atomfirmware.h"
28 #include "amdgpu_atomfirmware.h"
29 #include "smu_v11_0.h"
30 #include "smu11_driver_if.h"
31 #include "soc15_common.h"
33 #include "vega20_ppt.h"
34 #include "pp_thermal.h"
36 #include "asic_reg/thm/thm_11_0_2_offset.h"
37 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
38 #include "asic_reg/mp/mp_9_0_offset.h"
39 #include "asic_reg/mp/mp_9_0_sh_mask.h"
40 #include "asic_reg/nbio/nbio_7_4_offset.h"
41 #include "asic_reg/smuio/smuio_9_0_offset.h"
42 #include "asic_reg/smuio/smuio_9_0_sh_mask.h"
44 MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
46 #define SMU11_TOOL_SIZE 0x19000
47 #define SMU11_THERMAL_MINIMUM_ALERT_TEMP 0
48 #define SMU11_THERMAL_MAXIMUM_ALERT_TEMP 255
50 #define SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES 1000
51 #define SMU11_VOLTAGE_SCALE 4
53 #define SMC_DPM_FEATURE (FEATURE_DPM_PREFETCHER_MASK | \
54 FEATURE_DPM_GFXCLK_MASK | \
55 FEATURE_DPM_UCLK_MASK | \
56 FEATURE_DPM_SOCCLK_MASK | \
57 FEATURE_DPM_UVD_MASK | \
58 FEATURE_DPM_VCE_MASK | \
59 FEATURE_DPM_MP0CLK_MASK | \
60 FEATURE_DPM_LINK_MASK | \
61 FEATURE_DPM_DCEFCLK_MASK)
63 static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
66 struct amdgpu_device *adev = smu->adev;
67 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
71 static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
73 struct amdgpu_device *adev = smu->adev;
75 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
79 static int smu_v11_0_wait_for_response(struct smu_context *smu)
81 struct amdgpu_device *adev = smu->adev;
82 uint32_t cur_value, i;
84 for (i = 0; i < adev->usec_timeout; i++) {
85 cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
86 if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
91 /* timeout means wrong logic */
92 if (i == adev->usec_timeout)
95 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
98 static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
100 struct amdgpu_device *adev = smu->adev;
101 int ret = 0, index = 0;
103 index = smu_msg_get_index(smu, msg);
107 smu_v11_0_wait_for_response(smu);
109 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
111 smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
113 ret = smu_v11_0_wait_for_response(smu);
116 pr_err("Failed to send message 0x%x, response 0x%x\n", index,
124 smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
128 struct amdgpu_device *adev = smu->adev;
129 int ret = 0, index = 0;
131 index = smu_msg_get_index(smu, msg);
135 ret = smu_v11_0_wait_for_response(smu);
137 pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n",
140 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
142 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
144 smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
146 ret = smu_v11_0_wait_for_response(smu);
148 pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
154 static int smu_v11_0_init_microcode(struct smu_context *smu)
156 struct amdgpu_device *adev = smu->adev;
157 const char *chip_name;
160 const struct smc_firmware_header_v1_0 *hdr;
161 const struct common_firmware_header *header;
162 struct amdgpu_firmware_info *ucode = NULL;
164 switch (adev->asic_type) {
166 chip_name = "vega20";
172 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
174 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
177 err = amdgpu_ucode_validate(adev->pm.fw);
181 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
182 amdgpu_ucode_print_smc_hdr(&hdr->header);
183 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
185 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
186 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
187 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
188 ucode->fw = adev->pm.fw;
189 header = (const struct common_firmware_header *)ucode->fw->data;
190 adev->firmware.fw_size +=
191 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
196 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
198 release_firmware(adev->pm.fw);
204 static int smu_v11_0_load_microcode(struct smu_context *smu)
209 static int smu_v11_0_check_fw_status(struct smu_context *smu)
211 struct amdgpu_device *adev = smu->adev;
212 uint32_t mp1_fw_flags;
214 mp1_fw_flags = RREG32_PCIE(MP1_Public |
215 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
217 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
218 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
224 static int smu_v11_0_check_fw_version(struct smu_context *smu)
226 uint32_t smu_version = 0xff;
229 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
233 ret = smu_read_smc_arg(smu, &smu_version);
237 if (smu_version != smu->smc_if_version)
243 static int smu_v11_0_read_pptable_from_vbios(struct smu_context *smu)
250 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
253 ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
258 if (!smu->smu_table.power_play_table)
259 smu->smu_table.power_play_table = table;
260 if (!smu->smu_table.power_play_table_size)
261 smu->smu_table.power_play_table_size = size;
266 static int smu_v11_0_init_dpm_context(struct smu_context *smu)
268 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
270 if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
273 return smu_alloc_dpm_context(smu);
276 static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
278 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
280 if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
283 kfree(smu_dpm->dpm_context);
284 kfree(smu_dpm->golden_dpm_context);
285 kfree(smu_dpm->dpm_current_power_state);
286 kfree(smu_dpm->dpm_request_power_state);
287 smu_dpm->dpm_context = NULL;
288 smu_dpm->golden_dpm_context = NULL;
289 smu_dpm->dpm_context_size = 0;
290 smu_dpm->dpm_current_power_state = NULL;
291 smu_dpm->dpm_request_power_state = NULL;
296 static int smu_v11_0_init_smc_tables(struct smu_context *smu)
298 struct smu_table_context *smu_table = &smu->smu_table;
299 struct smu_table *tables = NULL;
302 if (smu_table->tables || smu_table->table_count != 0)
305 tables = kcalloc(TABLE_COUNT, sizeof(struct smu_table), GFP_KERNEL);
309 smu_table->tables = tables;
310 smu_table->table_count = TABLE_COUNT;
312 SMU_TABLE_INIT(tables, TABLE_PPTABLE, sizeof(PPTable_t),
313 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
314 SMU_TABLE_INIT(tables, TABLE_WATERMARKS, sizeof(Watermarks_t),
315 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
316 SMU_TABLE_INIT(tables, TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
317 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
318 SMU_TABLE_INIT(tables, TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
319 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
320 SMU_TABLE_INIT(tables, TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, PAGE_SIZE,
321 AMDGPU_GEM_DOMAIN_VRAM);
322 SMU_TABLE_INIT(tables, TABLE_ACTIVITY_MONITOR_COEFF,
323 sizeof(DpmActivityMonitorCoeffInt_t),
325 AMDGPU_GEM_DOMAIN_VRAM);
327 ret = smu_v11_0_init_dpm_context(smu);
334 static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
336 struct smu_table_context *smu_table = &smu->smu_table;
339 if (!smu_table->tables || smu_table->table_count == 0)
342 kfree(smu_table->tables);
343 smu_table->tables = NULL;
344 smu_table->table_count = 0;
346 ret = smu_v11_0_fini_dpm_context(smu);
352 static int smu_v11_0_init_power(struct smu_context *smu)
354 struct smu_power_context *smu_power = &smu->smu_power;
356 if (smu_power->power_context || smu_power->power_context_size != 0)
359 smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
361 if (!smu_power->power_context)
363 smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
368 static int smu_v11_0_fini_power(struct smu_context *smu)
370 struct smu_power_context *smu_power = &smu->smu_power;
372 if (!smu_power->power_context || smu_power->power_context_size == 0)
375 kfree(smu_power->power_context);
376 smu_power->power_context = NULL;
377 smu_power->power_context_size = 0;
382 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
387 struct atom_common_table_header *header;
388 struct atom_firmware_info_v3_3 *v_3_3;
389 struct atom_firmware_info_v3_1 *v_3_1;
391 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
394 ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
395 (uint8_t **)&header);
399 if (header->format_revision != 3) {
400 pr_err("unknown atom_firmware_info version! for smu11\n");
404 switch (header->content_revision) {
408 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
409 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
410 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
411 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
412 smu->smu_table.boot_values.socclk = 0;
413 smu->smu_table.boot_values.dcefclk = 0;
414 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
415 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
416 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
417 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
418 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
419 smu->smu_table.boot_values.pp_table_id = 0;
423 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
424 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
425 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
426 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
427 smu->smu_table.boot_values.socclk = 0;
428 smu->smu_table.boot_values.dcefclk = 0;
429 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
430 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
431 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
432 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
433 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
434 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
440 static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
443 struct amdgpu_device *adev = smu->adev;
444 struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
445 struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
447 input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
448 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
449 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
452 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
457 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
458 smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
460 memset(&input, 0, sizeof(input));
461 input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
462 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
463 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
466 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
471 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
472 smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
474 memset(&input, 0, sizeof(input));
475 input.clk_id = SMU11_SYSPLL0_ECLK_ID;
476 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
477 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
480 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
485 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
486 smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
488 memset(&input, 0, sizeof(input));
489 input.clk_id = SMU11_SYSPLL0_VCLK_ID;
490 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
491 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
494 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
499 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
500 smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
502 memset(&input, 0, sizeof(input));
503 input.clk_id = SMU11_SYSPLL0_DCLK_ID;
504 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
505 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
508 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
513 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
514 smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
519 static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
521 struct smu_table_context *smu_table = &smu->smu_table;
522 struct smu_table *memory_pool = &smu_table->memory_pool;
525 uint32_t address_low, address_high;
527 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
530 address = (uintptr_t)memory_pool->cpu_addr;
531 address_high = (uint32_t)upper_32_bits(address);
532 address_low = (uint32_t)lower_32_bits(address);
534 ret = smu_send_smc_msg_with_param(smu,
535 SMU_MSG_SetSystemVirtualDramAddrHigh,
539 ret = smu_send_smc_msg_with_param(smu,
540 SMU_MSG_SetSystemVirtualDramAddrLow,
545 address = memory_pool->mc_address;
546 address_high = (uint32_t)upper_32_bits(address);
547 address_low = (uint32_t)lower_32_bits(address);
549 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
553 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
557 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
558 (uint32_t)memory_pool->size);
565 static int smu_v11_0_check_pptable(struct smu_context *smu)
569 ret = smu_check_powerplay_table(smu);
573 static int smu_v11_0_parse_pptable(struct smu_context *smu)
577 struct smu_table_context *table_context = &smu->smu_table;
579 if (table_context->driver_pptable)
582 table_context->driver_pptable = kzalloc(sizeof(PPTable_t), GFP_KERNEL);
584 if (!table_context->driver_pptable)
587 ret = smu_store_powerplay_table(smu);
591 ret = smu_append_powerplay_table(smu);
596 static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
600 ret = smu_set_default_dpm_table(smu);
605 static int smu_v11_0_write_pptable(struct smu_context *smu)
607 struct smu_table_context *table_context = &smu->smu_table;
610 ret = smu_update_table(smu, TABLE_PPTABLE, table_context->driver_pptable, true);
615 static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
617 return smu_update_table(smu, TABLE_WATERMARKS,
618 smu->smu_table.tables[TABLE_WATERMARKS].cpu_addr, true);
621 static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
625 ret = smu_send_smc_msg_with_param(smu,
626 SMU_MSG_SetMinDeepSleepDcefclk, clk);
628 pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
633 static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
635 struct smu_table_context *table_context = &smu->smu_table;
640 return smu_set_deep_sleep_dcefclk(smu,
641 table_context->boot_values.dcefclk / 100);
644 static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
647 struct smu_table *tool_table = &smu->smu_table.tables[TABLE_PMSTATUSLOG];
649 if (tool_table->mc_address) {
650 ret = smu_send_smc_msg_with_param(smu,
651 SMU_MSG_SetToolsDramAddrHigh,
652 upper_32_bits(tool_table->mc_address));
654 ret = smu_send_smc_msg_with_param(smu,
655 SMU_MSG_SetToolsDramAddrLow,
656 lower_32_bits(tool_table->mc_address));
662 static int smu_v11_0_init_display(struct smu_context *smu)
665 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
669 static int smu_v11_0_update_feature_enable_state(struct smu_context *smu, uint32_t feature_id, bool enabled)
671 uint32_t feature_low = 0, feature_high = 0;
674 if (feature_id >= 0 && feature_id < 31)
675 feature_low = (1 << feature_id);
676 else if (feature_id > 31 && feature_id < 63)
677 feature_high = (1 << feature_id);
682 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
686 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
692 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
696 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
706 static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
708 struct smu_feature *feature = &smu->smu_feature;
710 uint32_t feature_mask[2];
712 mutex_lock(&feature->mutex);
713 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
716 bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
718 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
723 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
729 mutex_unlock(&feature->mutex);
733 static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
734 uint32_t *feature_mask, uint32_t num)
736 uint32_t feature_mask_high = 0, feature_mask_low = 0;
739 if (!feature_mask || num < 2)
742 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
745 ret = smu_read_smc_arg(smu, &feature_mask_high);
749 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
752 ret = smu_read_smc_arg(smu, &feature_mask_low);
756 feature_mask[0] = feature_mask_low;
757 feature_mask[1] = feature_mask_high;
762 static bool smu_v11_0_is_dpm_running(struct smu_context *smu)
765 uint32_t feature_mask[2];
766 unsigned long feature_enabled;
767 ret = smu_v11_0_get_enabled_mask(smu, feature_mask, 2);
768 feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
769 ((uint64_t)feature_mask[1] << 32));
770 return !!(feature_enabled & SMC_DPM_FEATURE);
773 static int smu_v11_0_system_features_control(struct smu_context *smu,
776 struct smu_feature *feature = &smu->smu_feature;
777 uint32_t feature_mask[2];
780 ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
781 SMU_MSG_DisableAllSmuFeatures));
784 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
788 bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
789 feature->feature_num);
790 bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
791 feature->feature_num);
796 static int smu_v11_0_notify_display_change(struct smu_context *smu)
800 if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT))
801 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
807 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
808 PPCLK_e clock_select)
812 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
815 pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
819 ret = smu_read_smc_arg(smu, clock);
826 /* if DC limit is zero, return AC limit */
827 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
830 pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
834 ret = smu_read_smc_arg(smu, clock);
839 static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
841 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
844 max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
846 smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
848 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
849 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
850 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
851 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
852 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
853 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
855 if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
856 ret = smu_v11_0_get_max_sustainable_clock(smu,
857 &(max_sustainable_clocks->uclock),
860 pr_err("[%s] failed to get max UCLK from SMC!",
866 if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
867 ret = smu_v11_0_get_max_sustainable_clock(smu,
868 &(max_sustainable_clocks->soc_clock),
871 pr_err("[%s] failed to get max SOCCLK from SMC!",
877 if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
878 ret = smu_v11_0_get_max_sustainable_clock(smu,
879 &(max_sustainable_clocks->dcef_clock),
882 pr_err("[%s] failed to get max DCEFCLK from SMC!",
887 ret = smu_v11_0_get_max_sustainable_clock(smu,
888 &(max_sustainable_clocks->display_clock),
891 pr_err("[%s] failed to get max DISPCLK from SMC!",
895 ret = smu_v11_0_get_max_sustainable_clock(smu,
896 &(max_sustainable_clocks->phy_clock),
899 pr_err("[%s] failed to get max PHYCLK from SMC!",
903 ret = smu_v11_0_get_max_sustainable_clock(smu,
904 &(max_sustainable_clocks->pixel_clock),
907 pr_err("[%s] failed to get max PIXCLK from SMC!",
913 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
914 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
919 static int smu_v11_0_get_power_limit(struct smu_context *smu,
926 mutex_lock(&smu->mutex);
927 *limit = smu->default_power_limit;
928 if (smu->od_enabled) {
929 *limit *= (100 + smu->smu_table.TDPODLimit);
932 mutex_unlock(&smu->mutex);
934 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
935 POWER_SOURCE_AC << 16);
937 pr_err("[%s] get PPT limit failed!", __func__);
940 smu_read_smc_arg(smu, limit);
941 smu->power_limit = *limit;
947 static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
949 uint32_t max_power_limit;
953 n = smu->default_power_limit;
955 max_power_limit = smu->default_power_limit;
957 if (smu->od_enabled) {
958 max_power_limit *= (100 + smu->smu_table.TDPODLimit);
959 max_power_limit /= 100;
962 if (smu_feature_is_enabled(smu, FEATURE_PPT_BIT))
963 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
965 pr_err("[%s] Set power limit Failed!", __func__);
972 static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, uint32_t clk_id, uint32_t *value)
977 if (clk_id >= PPCLK_COUNT || !value)
980 ret = smu_send_smc_msg_with_param(smu,
981 SMU_MSG_GetDpmClockFreq, (clk_id << 16));
985 ret = smu_read_smc_arg(smu, &freq);
995 static int smu_v11_0_get_thermal_range(struct smu_context *smu,
996 struct PP_TemperatureRange *range)
998 memcpy(range, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
1000 range->max = smu->smu_table.software_shutdown_temp *
1001 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1006 static int smu_v11_0_set_thermal_range(struct smu_context *smu,
1007 struct PP_TemperatureRange *range)
1009 struct amdgpu_device *adev = smu->adev;
1010 int low = SMU11_THERMAL_MINIMUM_ALERT_TEMP *
1011 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1012 int high = SMU11_THERMAL_MAXIMUM_ALERT_TEMP *
1013 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1016 if (low < range->min)
1018 if (high > range->max)
1024 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1025 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1026 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1027 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
1028 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
1029 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1031 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1036 static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
1038 struct amdgpu_device *adev = smu->adev;
1041 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1042 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1043 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1045 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
1050 static int smu_v11_0_set_thermal_fan_table(struct smu_context *smu)
1053 struct smu_table_context *table_context = &smu->smu_table;
1054 PPTable_t *pptable = table_context->driver_pptable;
1056 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget,
1057 (uint32_t)pptable->FanTargetTemperature);
1062 static int smu_v11_0_start_thermal_control(struct smu_context *smu)
1065 struct PP_TemperatureRange range;
1066 struct amdgpu_device *adev = smu->adev;
1068 smu_v11_0_get_thermal_range(smu, &range);
1070 if (smu->smu_table.thermal_controller_type) {
1071 ret = smu_v11_0_set_thermal_range(smu, &range);
1075 ret = smu_v11_0_enable_thermal_alert(smu);
1078 ret = smu_v11_0_set_thermal_fan_table(smu);
1083 adev->pm.dpm.thermal.min_temp = range.min;
1084 adev->pm.dpm.thermal.max_temp = range.max;
1089 static int smu_v11_0_get_current_activity_percent(struct smu_context *smu,
1093 SmuMetrics_t metrics;
1098 ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
1102 *value = metrics.AverageGfxActivity;
1107 static int smu_v11_0_thermal_get_temperature(struct smu_context *smu, uint32_t *value)
1109 struct amdgpu_device *adev = smu->adev;
1115 temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
1116 temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
1117 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
1119 temp = temp & 0x1ff;
1120 temp *= SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES;
1127 static int smu_v11_0_get_gpu_power(struct smu_context *smu, uint32_t *value)
1130 SmuMetrics_t metrics;
1135 ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
1139 *value = metrics.CurrSocketPower << 8;
1144 static uint16_t convert_to_vddc(uint8_t vid)
1146 return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
1149 static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1151 struct amdgpu_device *adev = smu->adev;
1152 uint32_t vdd = 0, val_vid = 0;
1156 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
1157 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1158 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1160 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1168 static int smu_v11_0_read_sensor(struct smu_context *smu,
1169 enum amd_pp_sensors sensor,
1170 void *data, uint32_t *size)
1172 struct smu_table_context *table_context = &smu->smu_table;
1173 PPTable_t *pptable = table_context->driver_pptable;
1176 case AMDGPU_PP_SENSOR_GPU_LOAD:
1177 ret = smu_v11_0_get_current_activity_percent(smu,
1181 case AMDGPU_PP_SENSOR_GFX_MCLK:
1182 ret = smu_get_current_clk_freq(smu, PPCLK_UCLK, (uint32_t *)data);
1185 case AMDGPU_PP_SENSOR_GFX_SCLK:
1186 ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, (uint32_t *)data);
1189 case AMDGPU_PP_SENSOR_GPU_TEMP:
1190 ret = smu_v11_0_thermal_get_temperature(smu, (uint32_t *)data);
1193 case AMDGPU_PP_SENSOR_GPU_POWER:
1194 ret = smu_v11_0_get_gpu_power(smu, (uint32_t *)data);
1197 case AMDGPU_PP_SENSOR_VDDGFX:
1198 ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
1201 case AMDGPU_PP_SENSOR_UVD_POWER:
1202 *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT) ? 1 : 0;
1205 case AMDGPU_PP_SENSOR_VCE_POWER:
1206 *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT) ? 1 : 0;
1209 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
1210 *(uint32_t *)data = 0;
1213 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
1214 *(uint32_t *)data = pptable->FanMaximumRpm;
1218 ret = smu_common_read_sensor(smu, sensor, data, size);
1229 smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
1230 struct pp_display_clock_request
1233 enum amd_pp_clock_type clk_type = clock_req->clock_type;
1235 PPCLK_e clk_select = 0;
1236 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1238 if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
1240 case amd_pp_dcef_clock:
1241 clk_select = PPCLK_DCEFCLK;
1243 case amd_pp_disp_clock:
1244 clk_select = PPCLK_DISPCLK;
1246 case amd_pp_pixel_clock:
1247 clk_select = PPCLK_PIXCLK;
1249 case amd_pp_phy_clock:
1250 clk_select = PPCLK_PHYCLK;
1253 pr_info("[%s] Invalid Clock Type!", __func__);
1261 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1262 (clk_select << 16) | clk_freq);
1269 static int smu_v11_0_set_watermarks_table(struct smu_context *smu,
1270 Watermarks_t *table, struct
1271 dm_pp_wm_sets_with_clock_ranges_soc15
1276 if (!table || !clock_ranges)
1279 if (clock_ranges->num_wm_dmif_sets > 4 ||
1280 clock_ranges->num_wm_mcif_sets > 4)
1283 for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
1284 table->WatermarkRow[1][i].MinClock =
1285 cpu_to_le16((uint16_t)
1286 (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
1288 table->WatermarkRow[1][i].MaxClock =
1289 cpu_to_le16((uint16_t)
1290 (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
1292 table->WatermarkRow[1][i].MinUclk =
1293 cpu_to_le16((uint16_t)
1294 (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1296 table->WatermarkRow[1][i].MaxUclk =
1297 cpu_to_le16((uint16_t)
1298 (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1300 table->WatermarkRow[1][i].WmSetting = (uint8_t)
1301 clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
1304 for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
1305 table->WatermarkRow[0][i].MinClock =
1306 cpu_to_le16((uint16_t)
1307 (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
1309 table->WatermarkRow[0][i].MaxClock =
1310 cpu_to_le16((uint16_t)
1311 (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
1313 table->WatermarkRow[0][i].MinUclk =
1314 cpu_to_le16((uint16_t)
1315 (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1317 table->WatermarkRow[0][i].MaxUclk =
1318 cpu_to_le16((uint16_t)
1319 (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1321 table->WatermarkRow[0][i].WmSetting = (uint8_t)
1322 clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
1329 smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
1330 dm_pp_wm_sets_with_clock_ranges_soc15
1334 struct smu_table *watermarks = &smu->smu_table.tables[TABLE_WATERMARKS];
1335 Watermarks_t *table = watermarks->cpu_addr;
1337 if (!smu->disable_watermark &&
1338 smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT) &&
1339 smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
1340 smu_v11_0_set_watermarks_table(smu, table, clock_ranges);
1341 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1342 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1348 static int smu_v11_0_get_clock_ranges(struct smu_context *smu,
1350 PPCLK_e clock_select,
1356 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
1357 (clock_select << 16));
1359 pr_err("[GetClockRanges] Failed to get max clock from SMC!\n");
1362 smu_read_smc_arg(smu, clock);
1364 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq,
1365 (clock_select << 16));
1367 pr_err("[GetClockRanges] Failed to get min clock from SMC!\n");
1370 smu_read_smc_arg(smu, clock);
1376 static uint32_t smu_v11_0_dpm_get_sclk(struct smu_context *smu, bool low)
1381 if (!smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
1382 pr_err("[GetSclks]: gfxclk dpm not enabled!\n");
1387 ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, false);
1389 pr_err("[GetSclks]: fail to get min PPCLK_GFXCLK\n");
1393 ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, true);
1395 pr_err("[GetSclks]: fail to get max PPCLK_GFXCLK\n");
1400 return (gfx_clk * 100);
1403 static uint32_t smu_v11_0_dpm_get_mclk(struct smu_context *smu, bool low)
1408 if (!smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
1409 pr_err("[GetMclks]: memclk dpm not enabled!\n");
1414 ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_UCLK, false);
1416 pr_err("[GetMclks]: fail to get min PPCLK_UCLK\n");
1420 ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_GFXCLK, true);
1422 pr_err("[GetMclks]: fail to get max PPCLK_UCLK\n");
1427 return (mem_clk * 100);
1430 static int smu_v11_0_set_od8_default_settings(struct smu_context *smu,
1433 struct smu_table_context *table_context = &smu->smu_table;
1437 if (table_context->overdrive_table)
1440 table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL);
1442 if (!table_context->overdrive_table)
1445 ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, false);
1447 pr_err("Failed to export over drive table!\n");
1451 smu_set_default_od8_settings(smu);
1454 ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, true);
1456 pr_err("Failed to import over drive table!\n");
1463 static int smu_v11_0_conv_power_profile_to_pplib_workload(int power_profile)
1465 int pplib_workload = 0;
1467 switch (power_profile) {
1468 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
1469 pplib_workload = WORKLOAD_DEFAULT_BIT;
1471 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
1472 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
1474 case PP_SMC_POWER_PROFILE_POWERSAVING:
1475 pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
1477 case PP_SMC_POWER_PROFILE_VIDEO:
1478 pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
1480 case PP_SMC_POWER_PROFILE_VR:
1481 pplib_workload = WORKLOAD_PPLIB_VR_BIT;
1483 case PP_SMC_POWER_PROFILE_COMPUTE:
1484 pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
1486 case PP_SMC_POWER_PROFILE_CUSTOM:
1487 pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
1491 return pplib_workload;
1494 static int smu_v11_0_get_power_profile_mode(struct smu_context *smu, char *buf)
1496 DpmActivityMonitorCoeffInt_t activity_monitor;
1497 uint32_t i, size = 0;
1498 uint16_t workload_type = 0;
1499 static const char *profile_name[] = {
1507 static const char *title[] = {
1508 "PROFILE_INDEX(NAME)",
1512 "MinActiveFreqType",
1517 "PD_Data_error_coeff",
1518 "PD_Data_error_rate_coeff"};
1524 size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
1525 title[0], title[1], title[2], title[3], title[4], title[5],
1526 title[6], title[7], title[8], title[9], title[10]);
1528 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1529 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1530 workload_type = smu_v11_0_conv_power_profile_to_pplib_workload(i);
1531 result = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
1532 workload_type, &activity_monitor, false);
1534 pr_err("[%s] Failed to get activity monitor!", __func__);
1538 size += sprintf(buf + size, "%2d %14s%s:\n",
1539 i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1541 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1545 activity_monitor.Gfx_FPS,
1546 activity_monitor.Gfx_UseRlcBusy,
1547 activity_monitor.Gfx_MinActiveFreqType,
1548 activity_monitor.Gfx_MinActiveFreq,
1549 activity_monitor.Gfx_BoosterFreqType,
1550 activity_monitor.Gfx_BoosterFreq,
1551 activity_monitor.Gfx_PD_Data_limit_c,
1552 activity_monitor.Gfx_PD_Data_error_coeff,
1553 activity_monitor.Gfx_PD_Data_error_rate_coeff);
1555 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1559 activity_monitor.Soc_FPS,
1560 activity_monitor.Soc_UseRlcBusy,
1561 activity_monitor.Soc_MinActiveFreqType,
1562 activity_monitor.Soc_MinActiveFreq,
1563 activity_monitor.Soc_BoosterFreqType,
1564 activity_monitor.Soc_BoosterFreq,
1565 activity_monitor.Soc_PD_Data_limit_c,
1566 activity_monitor.Soc_PD_Data_error_coeff,
1567 activity_monitor.Soc_PD_Data_error_rate_coeff);
1569 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1573 activity_monitor.Mem_FPS,
1574 activity_monitor.Mem_UseRlcBusy,
1575 activity_monitor.Mem_MinActiveFreqType,
1576 activity_monitor.Mem_MinActiveFreq,
1577 activity_monitor.Mem_BoosterFreqType,
1578 activity_monitor.Mem_BoosterFreq,
1579 activity_monitor.Mem_PD_Data_limit_c,
1580 activity_monitor.Mem_PD_Data_error_coeff,
1581 activity_monitor.Mem_PD_Data_error_rate_coeff);
1583 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1587 activity_monitor.Fclk_FPS,
1588 activity_monitor.Fclk_UseRlcBusy,
1589 activity_monitor.Fclk_MinActiveFreqType,
1590 activity_monitor.Fclk_MinActiveFreq,
1591 activity_monitor.Fclk_BoosterFreqType,
1592 activity_monitor.Fclk_BoosterFreq,
1593 activity_monitor.Fclk_PD_Data_limit_c,
1594 activity_monitor.Fclk_PD_Data_error_coeff,
1595 activity_monitor.Fclk_PD_Data_error_rate_coeff);
1601 static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1603 DpmActivityMonitorCoeffInt_t activity_monitor;
1604 int workload_type = 0, ret = 0;
1606 smu->power_profile_mode = input[size];
1608 if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
1609 pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
1613 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1614 ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
1615 WORKLOAD_PPLIB_CUSTOM_BIT, &activity_monitor, false);
1617 pr_err("[%s] Failed to get activity monitor!", __func__);
1622 case 0: /* Gfxclk */
1623 activity_monitor.Gfx_FPS = input[1];
1624 activity_monitor.Gfx_UseRlcBusy = input[2];
1625 activity_monitor.Gfx_MinActiveFreqType = input[3];
1626 activity_monitor.Gfx_MinActiveFreq = input[4];
1627 activity_monitor.Gfx_BoosterFreqType = input[5];
1628 activity_monitor.Gfx_BoosterFreq = input[6];
1629 activity_monitor.Gfx_PD_Data_limit_c = input[7];
1630 activity_monitor.Gfx_PD_Data_error_coeff = input[8];
1631 activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
1633 case 1: /* Socclk */
1634 activity_monitor.Soc_FPS = input[1];
1635 activity_monitor.Soc_UseRlcBusy = input[2];
1636 activity_monitor.Soc_MinActiveFreqType = input[3];
1637 activity_monitor.Soc_MinActiveFreq = input[4];
1638 activity_monitor.Soc_BoosterFreqType = input[5];
1639 activity_monitor.Soc_BoosterFreq = input[6];
1640 activity_monitor.Soc_PD_Data_limit_c = input[7];
1641 activity_monitor.Soc_PD_Data_error_coeff = input[8];
1642 activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
1645 activity_monitor.Mem_FPS = input[1];
1646 activity_monitor.Mem_UseRlcBusy = input[2];
1647 activity_monitor.Mem_MinActiveFreqType = input[3];
1648 activity_monitor.Mem_MinActiveFreq = input[4];
1649 activity_monitor.Mem_BoosterFreqType = input[5];
1650 activity_monitor.Mem_BoosterFreq = input[6];
1651 activity_monitor.Mem_PD_Data_limit_c = input[7];
1652 activity_monitor.Mem_PD_Data_error_coeff = input[8];
1653 activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
1656 activity_monitor.Fclk_FPS = input[1];
1657 activity_monitor.Fclk_UseRlcBusy = input[2];
1658 activity_monitor.Fclk_MinActiveFreqType = input[3];
1659 activity_monitor.Fclk_MinActiveFreq = input[4];
1660 activity_monitor.Fclk_BoosterFreqType = input[5];
1661 activity_monitor.Fclk_BoosterFreq = input[6];
1662 activity_monitor.Fclk_PD_Data_limit_c = input[7];
1663 activity_monitor.Fclk_PD_Data_error_coeff = input[8];
1664 activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
1668 ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
1669 WORKLOAD_PPLIB_COMPUTE_BIT, &activity_monitor, true);
1671 pr_err("[%s] Failed to set activity monitor!", __func__);
1676 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1678 smu_v11_0_conv_power_profile_to_pplib_workload(smu->power_profile_mode);
1679 smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1680 1 << workload_type);
1685 static int smu_v11_0_update_od8_settings(struct smu_context *smu,
1689 struct smu_table_context *table_context = &smu->smu_table;
1692 ret = smu_update_table(smu, TABLE_OVERDRIVE,
1693 table_context->overdrive_table, false);
1695 pr_err("Failed to export over drive table!\n");
1699 smu_update_specified_od8_value(smu, index, value);
1701 ret = smu_update_table(smu, TABLE_OVERDRIVE,
1702 table_context->overdrive_table, true);
1704 pr_err("Failed to import over drive table!\n");
1711 static int smu_v11_0_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
1713 if (!smu_feature_is_supported(smu, FEATURE_DPM_VCE_BIT))
1716 if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT))
1719 return smu_feature_set_enabled(smu, FEATURE_DPM_VCE_BIT, enable);
1722 static int smu_v11_0_dpm_set_vce_enable(struct smu_context *smu, bool enable)
1724 if (!smu_feature_is_supported(smu, FEATURE_DPM_UVD_BIT))
1727 if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT))
1730 return smu_feature_set_enabled(smu, FEATURE_DPM_UVD_BIT, enable);
1733 static int smu_v11_0_get_current_rpm(struct smu_context *smu,
1734 uint32_t *current_rpm)
1738 ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
1741 pr_err("Attempt to get current RPM from SMC Failed!\n");
1745 smu_read_smc_arg(smu, current_rpm);
1751 smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1753 if (!smu_feature_is_enabled(smu, FEATURE_FAN_CONTROL_BIT))
1754 return AMD_FAN_CTRL_MANUAL;
1756 return AMD_FAN_CTRL_AUTO;
1760 smu_v11_0_get_fan_speed_percent(struct smu_context *smu,
1764 uint32_t percent = 0;
1765 uint32_t current_rpm;
1766 PPTable_t *pptable = smu->smu_table.driver_pptable;
1768 ret = smu_v11_0_get_current_rpm(smu, ¤t_rpm);
1769 percent = current_rpm * 100 / pptable->FanMaximumRpm;
1770 *speed = percent > 100 ? 100 : percent;
1776 smu_v11_0_smc_fan_control(struct smu_context *smu, bool start)
1780 if (smu_feature_is_supported(smu, FEATURE_FAN_CONTROL_BIT))
1783 ret = smu_feature_set_enabled(smu, FEATURE_FAN_CONTROL_BIT, start);
1785 pr_err("[%s]%s smc FAN CONTROL feature failed!",
1786 __func__, (start ? "Start" : "Stop"));
1792 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1794 struct amdgpu_device *adev = smu->adev;
1796 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1797 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1798 CG_FDO_CTRL2, TMIN, 0));
1799 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1800 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1801 CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1807 smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
1809 struct amdgpu_device *adev = smu->adev;
1818 if (smu_v11_0_smc_fan_control(smu, stop))
1820 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
1821 CG_FDO_CTRL1, FMAX_DUTY100);
1825 tmp64 = (uint64_t)speed * duty100;
1827 duty = (uint32_t)tmp64;
1829 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
1830 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
1831 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1833 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1837 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
1845 case AMD_FAN_CTRL_NONE:
1846 ret = smu_v11_0_set_fan_speed_percent(smu, 100);
1848 case AMD_FAN_CTRL_MANUAL:
1849 ret = smu_v11_0_smc_fan_control(smu, stop);
1851 case AMD_FAN_CTRL_AUTO:
1852 ret = smu_v11_0_smc_fan_control(smu, start);
1859 pr_err("[%s]Set fan control mode failed!", __func__);
1866 static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
1869 struct amdgpu_device *adev = smu->adev;
1871 uint32_t tach_period, crystal_clock_freq;
1877 mutex_lock(&(smu->mutex));
1878 ret = smu_v11_0_smc_fan_control(smu, stop);
1880 goto set_fan_speed_rpm_failed;
1882 crystal_clock_freq = amdgpu_asic_get_xclk(adev);
1883 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
1884 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
1885 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
1886 CG_TACH_CTRL, TARGET_PERIOD,
1889 ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
1891 set_fan_speed_rpm_failed:
1892 mutex_unlock(&(smu->mutex));
1896 static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
1900 mutex_lock(&(smu->mutex));
1901 ret = smu_send_smc_msg_with_param(smu,
1902 SMU_MSG_SetXgmiMode,
1903 pstate ? XGMI_STATE_D0 : XGMI_STATE_D3);
1904 mutex_unlock(&(smu->mutex));
1908 static const struct smu_funcs smu_v11_0_funcs = {
1909 .init_microcode = smu_v11_0_init_microcode,
1910 .load_microcode = smu_v11_0_load_microcode,
1911 .check_fw_status = smu_v11_0_check_fw_status,
1912 .check_fw_version = smu_v11_0_check_fw_version,
1913 .send_smc_msg = smu_v11_0_send_msg,
1914 .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
1915 .read_smc_arg = smu_v11_0_read_arg,
1916 .read_pptable_from_vbios = smu_v11_0_read_pptable_from_vbios,
1917 .init_smc_tables = smu_v11_0_init_smc_tables,
1918 .fini_smc_tables = smu_v11_0_fini_smc_tables,
1919 .init_power = smu_v11_0_init_power,
1920 .fini_power = smu_v11_0_fini_power,
1921 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
1922 .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
1923 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
1924 .check_pptable = smu_v11_0_check_pptable,
1925 .parse_pptable = smu_v11_0_parse_pptable,
1926 .populate_smc_pptable = smu_v11_0_populate_smc_pptable,
1927 .write_pptable = smu_v11_0_write_pptable,
1928 .write_watermarks_table = smu_v11_0_write_watermarks_table,
1929 .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
1930 .set_tool_table_location = smu_v11_0_set_tool_table_location,
1931 .init_display = smu_v11_0_init_display,
1932 .set_allowed_mask = smu_v11_0_set_allowed_mask,
1933 .get_enabled_mask = smu_v11_0_get_enabled_mask,
1934 .is_dpm_running = smu_v11_0_is_dpm_running,
1935 .system_features_control = smu_v11_0_system_features_control,
1936 .update_feature_enable_state = smu_v11_0_update_feature_enable_state,
1937 .notify_display_change = smu_v11_0_notify_display_change,
1938 .get_power_limit = smu_v11_0_get_power_limit,
1939 .set_power_limit = smu_v11_0_set_power_limit,
1940 .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
1941 .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
1942 .start_thermal_control = smu_v11_0_start_thermal_control,
1943 .read_sensor = smu_v11_0_read_sensor,
1944 .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
1945 .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
1946 .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
1947 .get_sclk = smu_v11_0_dpm_get_sclk,
1948 .get_mclk = smu_v11_0_dpm_get_mclk,
1949 .set_od8_default_settings = smu_v11_0_set_od8_default_settings,
1950 .conv_power_profile_to_pplib_workload = smu_v11_0_conv_power_profile_to_pplib_workload,
1951 .get_power_profile_mode = smu_v11_0_get_power_profile_mode,
1952 .set_power_profile_mode = smu_v11_0_set_power_profile_mode,
1953 .update_od8_settings = smu_v11_0_update_od8_settings,
1954 .dpm_set_uvd_enable = smu_v11_0_dpm_set_uvd_enable,
1955 .dpm_set_vce_enable = smu_v11_0_dpm_set_vce_enable,
1956 .get_current_rpm = smu_v11_0_get_current_rpm,
1957 .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
1958 .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
1959 .get_fan_speed_percent = smu_v11_0_get_fan_speed_percent,
1960 .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
1961 .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
1962 .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
1965 void smu_v11_0_set_smu_funcs(struct smu_context *smu)
1967 struct amdgpu_device *adev = smu->adev;
1969 smu->funcs = &smu_v11_0_funcs;
1970 switch (adev->asic_type) {
1972 vega20_set_ppt_funcs(smu);
1975 pr_warn("Unknown asic for smu11\n");