2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
27 #define SMU_11_0_PARTIAL_PPTABLE
31 #include "amdgpu_smu.h"
32 #include "smu_internal.h"
33 #include "atomfirmware.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "smu_v11_0.h"
36 #include "smu_v11_0_pptable.h"
37 #include "soc15_common.h"
41 #include "asic_reg/thm/thm_11_0_2_offset.h"
42 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
43 #include "asic_reg/mp/mp_11_0_offset.h"
44 #include "asic_reg/mp/mp_11_0_sh_mask.h"
45 #include "asic_reg/nbio/nbio_7_4_offset.h"
46 #include "asic_reg/nbio/nbio_7_4_sh_mask.h"
47 #include "asic_reg/smuio/smuio_11_0_0_offset.h"
48 #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
50 MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
51 MODULE_FIRMWARE("amdgpu/arcturus_smc.bin");
52 MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
53 MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
54 MODULE_FIRMWARE("amdgpu/navi12_smc.bin");
56 #define SMU11_VOLTAGE_SCALE 4
58 static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
61 struct amdgpu_device *adev = smu->adev;
62 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
66 int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
68 struct amdgpu_device *adev = smu->adev;
70 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
74 static int smu_v11_0_wait_for_response(struct smu_context *smu)
76 struct amdgpu_device *adev = smu->adev;
77 uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
79 for (i = 0; i < timeout; i++) {
80 cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
81 if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
86 /* timeout means wrong logic */
90 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
93 int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
95 struct amdgpu_device *adev = smu->adev;
96 int ret = 0, index = 0;
98 index = smu_msg_get_index(smu, msg);
102 smu_v11_0_wait_for_response(smu);
104 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
106 smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
108 ret = smu_v11_0_wait_for_response(smu);
111 pr_err("failed send message: %10s (%d) response %#x\n",
112 smu_get_message_name(smu, msg), index, ret);
119 smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
123 struct amdgpu_device *adev = smu->adev;
124 int ret = 0, index = 0;
126 index = smu_msg_get_index(smu, msg);
130 ret = smu_v11_0_wait_for_response(smu);
132 pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
133 smu_get_message_name(smu, msg), index, param, ret);
135 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
137 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
139 smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
141 ret = smu_v11_0_wait_for_response(smu);
143 pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
144 smu_get_message_name(smu, msg), index, param, ret);
149 int smu_v11_0_init_microcode(struct smu_context *smu)
151 struct amdgpu_device *adev = smu->adev;
152 const char *chip_name;
155 const struct smc_firmware_header_v1_0 *hdr;
156 const struct common_firmware_header *header;
157 struct amdgpu_firmware_info *ucode = NULL;
159 switch (adev->asic_type) {
161 chip_name = "vega20";
164 chip_name = "arcturus";
167 chip_name = "navi10";
170 chip_name = "navi14";
173 chip_name = "navi12";
179 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
181 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
184 err = amdgpu_ucode_validate(adev->pm.fw);
188 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
189 amdgpu_ucode_print_smc_hdr(&hdr->header);
190 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
192 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
193 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
194 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
195 ucode->fw = adev->pm.fw;
196 header = (const struct common_firmware_header *)ucode->fw->data;
197 adev->firmware.fw_size +=
198 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
203 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
205 release_firmware(adev->pm.fw);
211 int smu_v11_0_load_microcode(struct smu_context *smu)
213 struct amdgpu_device *adev = smu->adev;
215 const struct smc_firmware_header_v1_0 *hdr;
216 uint32_t addr_start = MP1_SRAM;
218 uint32_t mp1_fw_flags;
220 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
221 src = (const uint32_t *)(adev->pm.fw->data +
222 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
224 for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) {
225 WREG32_PCIE(addr_start, src[i]);
229 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
230 1 & MP1_SMN_PUB_CTRL__RESET_MASK);
231 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
232 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);
234 for (i = 0; i < adev->usec_timeout; i++) {
235 mp1_fw_flags = RREG32_PCIE(MP1_Public |
236 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
237 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
238 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
243 if (i == adev->usec_timeout)
249 int smu_v11_0_check_fw_status(struct smu_context *smu)
251 struct amdgpu_device *adev = smu->adev;
252 uint32_t mp1_fw_flags;
254 mp1_fw_flags = RREG32_PCIE(MP1_Public |
255 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
257 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
258 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
264 int smu_v11_0_check_fw_version(struct smu_context *smu)
266 uint32_t if_version = 0xff, smu_version = 0xff;
268 uint8_t smu_minor, smu_debug;
271 ret = smu_get_smc_version(smu, &if_version, &smu_version);
275 smu_major = (smu_version >> 16) & 0xffff;
276 smu_minor = (smu_version >> 8) & 0xff;
277 smu_debug = (smu_version >> 0) & 0xff;
279 switch (smu->adev->asic_type) {
281 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_VG20;
284 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
287 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
290 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
293 pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type);
294 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_INV;
299 * 1. if_version mismatch is not critical as our fw is designed
300 * to be backward compatible.
301 * 2. New fw usually brings some optimizations. But that's visible
302 * only on the paired driver.
303 * Considering above, we just leave user a warning message instead
304 * of halt driver loading.
306 if (if_version != smu->smc_if_version) {
307 pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
308 "smu fw version = 0x%08x (%d.%d.%d)\n",
309 smu->smc_if_version, if_version,
310 smu_version, smu_major, smu_minor, smu_debug);
311 pr_warn("SMU driver if version not matched\n");
317 static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
319 struct amdgpu_device *adev = smu->adev;
320 uint32_t ppt_offset_bytes;
321 const struct smc_firmware_header_v2_0 *v2;
323 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
325 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
326 *size = le32_to_cpu(v2->ppt_size_bytes);
327 *table = (uint8_t *)v2 + ppt_offset_bytes;
332 static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
333 uint32_t *size, uint32_t pptable_id)
335 struct amdgpu_device *adev = smu->adev;
336 const struct smc_firmware_header_v2_1 *v2_1;
337 struct smc_soft_pptable_entry *entries;
338 uint32_t pptable_count = 0;
341 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
342 entries = (struct smc_soft_pptable_entry *)
343 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
344 pptable_count = le32_to_cpu(v2_1->pptable_count);
345 for (i = 0; i < pptable_count; i++) {
346 if (le32_to_cpu(entries[i].id) == pptable_id) {
347 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
348 *size = le32_to_cpu(entries[i].ppt_size_bytes);
353 if (i == pptable_count)
359 int smu_v11_0_setup_pptable(struct smu_context *smu)
361 struct amdgpu_device *adev = smu->adev;
362 const struct smc_firmware_header_v1_0 *hdr;
365 uint16_t atom_table_size;
368 uint16_t version_major, version_minor;
370 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
371 version_major = le16_to_cpu(hdr->header.header_version_major);
372 version_minor = le16_to_cpu(hdr->header.header_version_minor);
373 if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
374 pr_info("use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
375 switch (version_minor) {
377 ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
380 ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
381 smu->smu_table.boot_values.pp_table_id);
391 pr_info("use vbios provided pptable\n");
392 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
395 ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
399 size = atom_table_size;
402 if (!smu->smu_table.power_play_table)
403 smu->smu_table.power_play_table = table;
404 if (!smu->smu_table.power_play_table_size)
405 smu->smu_table.power_play_table_size = size;
410 static int smu_v11_0_init_dpm_context(struct smu_context *smu)
412 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
414 if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
417 return smu_alloc_dpm_context(smu);
420 static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
422 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
424 if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
427 kfree(smu_dpm->dpm_context);
428 kfree(smu_dpm->golden_dpm_context);
429 kfree(smu_dpm->dpm_current_power_state);
430 kfree(smu_dpm->dpm_request_power_state);
431 smu_dpm->dpm_context = NULL;
432 smu_dpm->golden_dpm_context = NULL;
433 smu_dpm->dpm_context_size = 0;
434 smu_dpm->dpm_current_power_state = NULL;
435 smu_dpm->dpm_request_power_state = NULL;
440 int smu_v11_0_init_smc_tables(struct smu_context *smu)
442 struct smu_table_context *smu_table = &smu->smu_table;
443 struct smu_table *tables = NULL;
446 if (smu_table->tables)
449 tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
454 smu_table->tables = tables;
456 ret = smu_tables_init(smu, tables);
460 ret = smu_v11_0_init_dpm_context(smu);
467 int smu_v11_0_fini_smc_tables(struct smu_context *smu)
469 struct smu_table_context *smu_table = &smu->smu_table;
472 if (!smu_table->tables)
475 kfree(smu_table->tables);
476 kfree(smu_table->metrics_table);
477 smu_table->tables = NULL;
478 smu_table->metrics_table = NULL;
479 smu_table->metrics_time = 0;
481 ret = smu_v11_0_fini_dpm_context(smu);
487 int smu_v11_0_init_power(struct smu_context *smu)
489 struct smu_power_context *smu_power = &smu->smu_power;
491 if (!smu->pm_enabled)
493 if (smu_power->power_context || smu_power->power_context_size != 0)
496 smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
498 if (!smu_power->power_context)
500 smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
505 int smu_v11_0_fini_power(struct smu_context *smu)
507 struct smu_power_context *smu_power = &smu->smu_power;
509 if (!smu->pm_enabled)
511 if (!smu_power->power_context || smu_power->power_context_size == 0)
514 kfree(smu_power->power_context);
515 smu_power->power_context = NULL;
516 smu_power->power_context_size = 0;
521 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
526 struct atom_common_table_header *header;
527 struct atom_firmware_info_v3_3 *v_3_3;
528 struct atom_firmware_info_v3_1 *v_3_1;
530 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
533 ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
534 (uint8_t **)&header);
538 if (header->format_revision != 3) {
539 pr_err("unknown atom_firmware_info version! for smu11\n");
543 switch (header->content_revision) {
547 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
548 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
549 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
550 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
551 smu->smu_table.boot_values.socclk = 0;
552 smu->smu_table.boot_values.dcefclk = 0;
553 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
554 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
555 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
556 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
557 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
558 smu->smu_table.boot_values.pp_table_id = 0;
562 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
563 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
564 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
565 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
566 smu->smu_table.boot_values.socclk = 0;
567 smu->smu_table.boot_values.dcefclk = 0;
568 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
569 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
570 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
571 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
572 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
573 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
576 smu->smu_table.boot_values.format_revision = header->format_revision;
577 smu->smu_table.boot_values.content_revision = header->content_revision;
582 int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
585 struct amdgpu_device *adev = smu->adev;
586 struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
587 struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
589 input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
590 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
591 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
594 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
599 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
600 smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
602 memset(&input, 0, sizeof(input));
603 input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
604 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
605 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
608 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
613 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
614 smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
616 memset(&input, 0, sizeof(input));
617 input.clk_id = SMU11_SYSPLL0_ECLK_ID;
618 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
619 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
622 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
627 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
628 smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
630 memset(&input, 0, sizeof(input));
631 input.clk_id = SMU11_SYSPLL0_VCLK_ID;
632 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
633 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
636 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
641 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
642 smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
644 memset(&input, 0, sizeof(input));
645 input.clk_id = SMU11_SYSPLL0_DCLK_ID;
646 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
647 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
650 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
655 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
656 smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
658 if ((smu->smu_table.boot_values.format_revision == 3) &&
659 (smu->smu_table.boot_values.content_revision >= 2)) {
660 memset(&input, 0, sizeof(input));
661 input.clk_id = SMU11_SYSPLL1_0_FCLK_ID;
662 input.syspll_id = SMU11_SYSPLL1_2_ID;
663 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
664 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
667 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
672 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
673 smu->smu_table.boot_values.fclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
679 int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
681 struct smu_table_context *smu_table = &smu->smu_table;
682 struct smu_table *memory_pool = &smu_table->memory_pool;
685 uint32_t address_low, address_high;
687 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
690 address = (uintptr_t)memory_pool->cpu_addr;
691 address_high = (uint32_t)upper_32_bits(address);
692 address_low = (uint32_t)lower_32_bits(address);
694 ret = smu_send_smc_msg_with_param(smu,
695 SMU_MSG_SetSystemVirtualDramAddrHigh,
699 ret = smu_send_smc_msg_with_param(smu,
700 SMU_MSG_SetSystemVirtualDramAddrLow,
705 address = memory_pool->mc_address;
706 address_high = (uint32_t)upper_32_bits(address);
707 address_low = (uint32_t)lower_32_bits(address);
709 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
713 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
717 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
718 (uint32_t)memory_pool->size);
725 int smu_v11_0_check_pptable(struct smu_context *smu)
729 ret = smu_check_powerplay_table(smu);
733 int smu_v11_0_parse_pptable(struct smu_context *smu)
737 struct smu_table_context *table_context = &smu->smu_table;
738 struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
740 if (table_context->driver_pptable)
743 table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
745 if (!table_context->driver_pptable)
748 ret = smu_store_powerplay_table(smu);
752 ret = smu_append_powerplay_table(smu);
757 int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
761 ret = smu_set_default_dpm_table(smu);
766 int smu_v11_0_write_pptable(struct smu_context *smu)
768 struct smu_table_context *table_context = &smu->smu_table;
771 ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0,
772 table_context->driver_pptable, true);
777 int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
781 ret = smu_send_smc_msg_with_param(smu,
782 SMU_MSG_SetMinDeepSleepDcefclk, clk);
784 pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
789 int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
791 struct smu_table_context *table_context = &smu->smu_table;
793 if (!smu->pm_enabled)
798 return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
801 int smu_v11_0_set_tool_table_location(struct smu_context *smu)
804 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
806 if (tool_table->mc_address) {
807 ret = smu_send_smc_msg_with_param(smu,
808 SMU_MSG_SetToolsDramAddrHigh,
809 upper_32_bits(tool_table->mc_address));
811 ret = smu_send_smc_msg_with_param(smu,
812 SMU_MSG_SetToolsDramAddrLow,
813 lower_32_bits(tool_table->mc_address));
819 int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
823 if (!smu->pm_enabled)
826 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count);
831 int smu_v11_0_set_allowed_mask(struct smu_context *smu)
833 struct smu_feature *feature = &smu->smu_feature;
835 uint32_t feature_mask[2];
837 mutex_lock(&feature->mutex);
838 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
841 bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
843 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
848 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
854 mutex_unlock(&feature->mutex);
858 int smu_v11_0_get_enabled_mask(struct smu_context *smu,
859 uint32_t *feature_mask, uint32_t num)
861 uint32_t feature_mask_high = 0, feature_mask_low = 0;
864 if (!feature_mask || num < 2)
867 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
870 ret = smu_read_smc_arg(smu, &feature_mask_high);
874 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
877 ret = smu_read_smc_arg(smu, &feature_mask_low);
881 feature_mask[0] = feature_mask_low;
882 feature_mask[1] = feature_mask_high;
887 int smu_v11_0_system_features_control(struct smu_context *smu,
890 struct smu_feature *feature = &smu->smu_feature;
891 uint32_t feature_mask[2];
894 if (smu->pm_enabled) {
895 ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
896 SMU_MSG_DisableAllSmuFeatures));
901 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
905 bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
906 feature->feature_num);
907 bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
908 feature->feature_num);
913 int smu_v11_0_notify_display_change(struct smu_context *smu)
917 if (!smu->pm_enabled)
919 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
920 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
921 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
927 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
928 enum smu_clk_type clock_select)
933 if (!smu->pm_enabled)
936 if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
937 (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
940 clk_id = smu_clk_get_index(smu, clock_select);
944 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
947 pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
951 ret = smu_read_smc_arg(smu, clock);
958 /* if DC limit is zero, return AC limit */
959 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
962 pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
966 ret = smu_read_smc_arg(smu, clock);
971 int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
973 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
976 max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
978 smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
980 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
981 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
982 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
983 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
984 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
985 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
987 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
988 ret = smu_v11_0_get_max_sustainable_clock(smu,
989 &(max_sustainable_clocks->uclock),
992 pr_err("[%s] failed to get max UCLK from SMC!",
998 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
999 ret = smu_v11_0_get_max_sustainable_clock(smu,
1000 &(max_sustainable_clocks->soc_clock),
1003 pr_err("[%s] failed to get max SOCCLK from SMC!",
1009 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
1010 ret = smu_v11_0_get_max_sustainable_clock(smu,
1011 &(max_sustainable_clocks->dcef_clock),
1014 pr_err("[%s] failed to get max DCEFCLK from SMC!",
1019 ret = smu_v11_0_get_max_sustainable_clock(smu,
1020 &(max_sustainable_clocks->display_clock),
1023 pr_err("[%s] failed to get max DISPCLK from SMC!",
1027 ret = smu_v11_0_get_max_sustainable_clock(smu,
1028 &(max_sustainable_clocks->phy_clock),
1031 pr_err("[%s] failed to get max PHYCLK from SMC!",
1035 ret = smu_v11_0_get_max_sustainable_clock(smu,
1036 &(max_sustainable_clocks->pixel_clock),
1039 pr_err("[%s] failed to get max PIXCLK from SMC!",
1045 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
1046 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
1051 uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu) {
1052 uint32_t od_limit, max_power_limit;
1053 struct smu_11_0_powerplay_table *powerplay_table = NULL;
1054 struct smu_table_context *table_context = &smu->smu_table;
1055 powerplay_table = table_context->power_play_table;
1057 max_power_limit = smu_get_pptable_power_limit(smu);
1059 if (!max_power_limit) {
1060 // If we couldn't get the table limit, fall back on first-read value
1061 if (!smu->default_power_limit)
1062 smu->default_power_limit = smu->power_limit;
1063 max_power_limit = smu->default_power_limit;
1066 if (smu->od_enabled) {
1067 od_limit = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
1069 pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, smu->default_power_limit);
1071 max_power_limit *= (100 + od_limit);
1072 max_power_limit /= 100;
1075 return max_power_limit;
1078 int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
1081 uint32_t max_power_limit;
1083 max_power_limit = smu_v11_0_get_max_power_limit(smu);
1085 if (n > max_power_limit) {
1086 pr_err("New power limit (%d) is over the max allowed %d\n",
1093 n = smu->default_power_limit;
1095 if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
1096 pr_err("Setting new power limit is not supported!\n");
1100 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
1102 pr_err("[%s] Set power limit Failed!\n", __func__);
1105 smu->power_limit = n;
1110 int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
1111 enum smu_clk_type clk_id,
1118 if (clk_id >= SMU_CLK_COUNT || !value)
1121 asic_clk_id = smu_clk_get_index(smu, clk_id);
1122 if (asic_clk_id < 0)
1125 /* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */
1126 if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) < 0)
1127 ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
1129 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
1130 (asic_clk_id << 16));
1134 ret = smu_read_smc_arg(smu, &freq);
1145 static int smu_v11_0_set_thermal_range(struct smu_context *smu,
1146 struct smu_temperature_range range)
1148 struct amdgpu_device *adev = smu->adev;
1149 int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
1150 int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
1153 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
1154 range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1155 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1156 range.max / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1161 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1162 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1163 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1164 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
1165 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
1166 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
1167 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
1168 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1170 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1175 static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
1177 struct amdgpu_device *adev = smu->adev;
1180 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1181 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1182 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1184 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
1189 int smu_v11_0_start_thermal_control(struct smu_context *smu)
1192 struct smu_temperature_range range;
1193 struct amdgpu_device *adev = smu->adev;
1195 if (!smu->pm_enabled)
1198 memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
1200 ret = smu_get_thermal_temperature_range(smu, &range);
1204 if (smu->smu_table.thermal_controller_type) {
1205 ret = smu_v11_0_set_thermal_range(smu, range);
1209 ret = smu_v11_0_enable_thermal_alert(smu);
1213 ret = smu_set_thermal_fan_table(smu);
1218 adev->pm.dpm.thermal.min_temp = range.min;
1219 adev->pm.dpm.thermal.max_temp = range.max;
1220 adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
1221 adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
1222 adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
1223 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
1224 adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
1225 adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
1226 adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
1231 int smu_v11_0_stop_thermal_control(struct smu_context *smu)
1233 struct amdgpu_device *adev = smu->adev;
1235 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
1240 static uint16_t convert_to_vddc(uint8_t vid)
1242 return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
1245 static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1247 struct amdgpu_device *adev = smu->adev;
1248 uint32_t vdd = 0, val_vid = 0;
1252 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
1253 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1254 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1256 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1264 int smu_v11_0_read_sensor(struct smu_context *smu,
1265 enum amd_pp_sensors sensor,
1266 void *data, uint32_t *size)
1274 case AMDGPU_PP_SENSOR_GFX_MCLK:
1275 ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
1278 case AMDGPU_PP_SENSOR_GFX_SCLK:
1279 ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
1282 case AMDGPU_PP_SENSOR_VDDGFX:
1283 ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
1286 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
1287 *(uint32_t *)data = 0;
1291 ret = smu_common_read_sensor(smu, sensor, data, size);
1302 smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
1303 struct pp_display_clock_request
1306 enum amd_pp_clock_type clk_type = clock_req->clock_type;
1308 enum smu_clk_type clk_select = 0;
1309 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1311 if (!smu->pm_enabled)
1314 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1315 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1317 case amd_pp_dcef_clock:
1318 clk_select = SMU_DCEFCLK;
1320 case amd_pp_disp_clock:
1321 clk_select = SMU_DISPCLK;
1323 case amd_pp_pixel_clock:
1324 clk_select = SMU_PIXCLK;
1326 case amd_pp_phy_clock:
1327 clk_select = SMU_PHYCLK;
1329 case amd_pp_mem_clock:
1330 clk_select = SMU_UCLK;
1333 pr_info("[%s] Invalid Clock Type!", __func__);
1341 if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
1344 ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
1346 if(clk_select == SMU_UCLK)
1347 smu->hard_min_uclk_req_from_dal = clk_freq;
1354 int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
1357 struct amdgpu_device *adev = smu->adev;
1359 switch (adev->asic_type) {
1365 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
1368 ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
1370 ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
1380 smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1382 if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1383 return AMD_FAN_CTRL_MANUAL;
1385 return AMD_FAN_CTRL_AUTO;
1389 smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1393 if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1396 ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1398 pr_err("[%s]%s smc FAN CONTROL feature failed!",
1399 __func__, (auto_fan_control ? "Start" : "Stop"));
1405 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1407 struct amdgpu_device *adev = smu->adev;
1409 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1410 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1411 CG_FDO_CTRL2, TMIN, 0));
1412 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1413 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1414 CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1420 smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
1422 struct amdgpu_device *adev = smu->adev;
1423 uint32_t duty100, duty;
1429 if (smu_v11_0_auto_fan_control(smu, 0))
1432 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
1433 CG_FDO_CTRL1, FMAX_DUTY100);
1437 tmp64 = (uint64_t)speed * duty100;
1439 duty = (uint32_t)tmp64;
1441 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
1442 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
1443 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1445 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1449 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
1455 case AMD_FAN_CTRL_NONE:
1456 ret = smu_v11_0_set_fan_speed_percent(smu, 100);
1458 case AMD_FAN_CTRL_MANUAL:
1459 ret = smu_v11_0_auto_fan_control(smu, 0);
1461 case AMD_FAN_CTRL_AUTO:
1462 ret = smu_v11_0_auto_fan_control(smu, 1);
1469 pr_err("[%s]Set fan control mode failed!", __func__);
1476 int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
1479 struct amdgpu_device *adev = smu->adev;
1481 uint32_t tach_period, crystal_clock_freq;
1486 ret = smu_v11_0_auto_fan_control(smu, 0);
1490 crystal_clock_freq = amdgpu_asic_get_xclk(adev);
1491 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
1492 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
1493 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
1494 CG_TACH_CTRL, TARGET_PERIOD,
1497 ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
1502 int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
1506 ret = smu_send_smc_msg_with_param(smu,
1507 SMU_MSG_SetXgmiMode,
1508 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
1512 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
1513 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
1515 static int smu_v11_0_irq_process(struct amdgpu_device *adev,
1516 struct amdgpu_irq_src *source,
1517 struct amdgpu_iv_entry *entry)
1519 uint32_t client_id = entry->client_id;
1520 uint32_t src_id = entry->src_id;
1522 if (client_id == SOC15_IH_CLIENTID_THM) {
1524 case THM_11_0__SRCID__THM_DIG_THERM_L2H:
1525 pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
1526 PCI_BUS_NUM(adev->pdev->devfn),
1527 PCI_SLOT(adev->pdev->devfn),
1528 PCI_FUNC(adev->pdev->devfn));
1530 case THM_11_0__SRCID__THM_DIG_THERM_H2L:
1531 pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
1532 PCI_BUS_NUM(adev->pdev->devfn),
1533 PCI_SLOT(adev->pdev->devfn),
1534 PCI_FUNC(adev->pdev->devfn));
1537 pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n",
1539 PCI_BUS_NUM(adev->pdev->devfn),
1540 PCI_SLOT(adev->pdev->devfn),
1541 PCI_FUNC(adev->pdev->devfn));
1550 static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
1552 .process = smu_v11_0_irq_process,
1555 int smu_v11_0_register_irq_handler(struct smu_context *smu)
1557 struct amdgpu_device *adev = smu->adev;
1558 struct amdgpu_irq_src *irq_src = smu->irq_source;
1561 /* already register */
1565 irq_src = kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
1568 smu->irq_source = irq_src;
1570 irq_src->funcs = &smu_v11_0_irq_funcs;
1572 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1573 THM_11_0__SRCID__THM_DIG_THERM_L2H,
1578 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1579 THM_11_0__SRCID__THM_DIG_THERM_H2L,
1587 int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1588 struct pp_smu_nv_clock_table *max_clocks)
1590 struct smu_table_context *table_context = &smu->smu_table;
1591 struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL;
1593 if (!max_clocks || !table_context->max_sustainable_clocks)
1596 sustainable_clocks = table_context->max_sustainable_clocks;
1598 max_clocks->dcfClockInKhz =
1599 (unsigned int) sustainable_clocks->dcef_clock * 1000;
1600 max_clocks->displayClockInKhz =
1601 (unsigned int) sustainable_clocks->display_clock * 1000;
1602 max_clocks->phyClockInKhz =
1603 (unsigned int) sustainable_clocks->phy_clock * 1000;
1604 max_clocks->pixelClockInKhz =
1605 (unsigned int) sustainable_clocks->pixel_clock * 1000;
1606 max_clocks->uClockInKhz =
1607 (unsigned int) sustainable_clocks->uclock * 1000;
1608 max_clocks->socClockInKhz =
1609 (unsigned int) sustainable_clocks->soc_clock * 1000;
1610 max_clocks->dscClockInKhz = 0;
1611 max_clocks->dppClockInKhz = 0;
1612 max_clocks->fabricClockInKhz = 0;
1617 int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
1621 ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
1626 static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
1628 return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
1631 bool smu_v11_0_baco_is_support(struct smu_context *smu)
1633 struct amdgpu_device *adev = smu->adev;
1634 struct smu_baco_context *smu_baco = &smu->smu_baco;
1638 mutex_lock(&smu_baco->mutex);
1639 baco_support = smu_baco->platform_support;
1640 mutex_unlock(&smu_baco->mutex);
1645 if (!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
1648 val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
1649 if (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
1655 enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
1657 struct smu_baco_context *smu_baco = &smu->smu_baco;
1658 enum smu_baco_state baco_state;
1660 mutex_lock(&smu_baco->mutex);
1661 baco_state = smu_baco->state;
1662 mutex_unlock(&smu_baco->mutex);
1667 int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
1670 struct smu_baco_context *smu_baco = &smu->smu_baco;
1671 struct amdgpu_device *adev = smu->adev;
1672 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1673 uint32_t bif_doorbell_intr_cntl;
1677 if (smu_v11_0_baco_get_state(smu) == state)
1680 mutex_lock(&smu_baco->mutex);
1682 bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
1684 if (state == SMU_BACO_STATE_ENTER) {
1685 bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
1686 BIF_DOORBELL_INT_CNTL,
1687 DOORBELL_INTERRUPT_DISABLE, 1);
1688 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
1690 if (!ras || !ras->supported) {
1691 data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
1693 WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
1695 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0);
1697 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1);
1700 ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
1701 bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
1702 BIF_DOORBELL_INT_CNTL,
1703 DOORBELL_INTERRUPT_DISABLE, 0);
1704 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
1709 smu_baco->state = state;
1711 mutex_unlock(&smu_baco->mutex);
1715 int smu_v11_0_baco_reset(struct smu_context *smu)
1719 ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
1723 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
1729 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
1736 int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1737 uint32_t *min, uint32_t *max)
1739 int ret = 0, clk_id = 0;
1742 clk_id = smu_clk_get_index(smu, clk_type);
1747 param = (clk_id & 0xffff) << 16;
1750 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
1753 ret = smu_read_smc_arg(smu, max);
1759 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
1762 ret = smu_read_smc_arg(smu, min);
1771 int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
1772 uint32_t min, uint32_t max)
1774 int ret = 0, clk_id = 0;
1777 clk_id = smu_clk_get_index(smu, clk_type);
1782 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1783 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
1790 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1791 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
1800 int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
1802 struct amdgpu_device *adev = smu->adev;
1803 uint32_t pcie_gen = 0, pcie_width = 0;
1806 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1808 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1810 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1812 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1815 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1816 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1817 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1819 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1821 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1823 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1825 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1827 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1829 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1832 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1835 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
1841 int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size)
1843 struct smu_table_context *table_context = &smu->smu_table;
1847 if (table_context->overdrive_table) {
1850 table_context->overdrive_table = kzalloc(overdrive_table_size, GFP_KERNEL);
1851 if (!table_context->overdrive_table) {
1854 ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
1856 pr_err("Failed to export overdrive table!\n");
1860 ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
1862 pr_err("Failed to import overdrive table!\n");