2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
26 #include "amdgpu_smu.h"
27 #include "smu_internal.h"
28 #include "atomfirmware.h"
29 #include "amdgpu_atomfirmware.h"
30 #include "smu_v12_0.h"
31 #include "soc15_common.h"
33 #include "renoir_ppt.h"
35 #include "asic_reg/mp/mp_12_0_0_offset.h"
36 #include "asic_reg/mp/mp_12_0_0_sh_mask.h"
38 #define smnMP1_FIRMWARE_FLAGS 0x3010024
40 #define mmSMUIO_GFX_MISC_CNTL 0x00c8
41 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
42 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
43 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
45 static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
48 struct amdgpu_device *adev = smu->adev;
50 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
54 static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
56 struct amdgpu_device *adev = smu->adev;
58 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
62 static int smu_v12_0_wait_for_response(struct smu_context *smu)
64 struct amdgpu_device *adev = smu->adev;
65 uint32_t cur_value, i;
67 for (i = 0; i < adev->usec_timeout; i++) {
68 cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
69 if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
74 /* timeout means wrong logic */
75 if (i == adev->usec_timeout)
78 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
81 static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
83 struct amdgpu_device *adev = smu->adev;
84 int ret = 0, index = 0;
86 index = smu_msg_get_index(smu, msg);
90 smu_v12_0_wait_for_response(smu);
92 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
94 smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
96 ret = smu_v12_0_wait_for_response(smu);
99 pr_err("Failed to send message 0x%x, response 0x%x\n", index,
107 smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
110 struct amdgpu_device *adev = smu->adev;
111 int ret = 0, index = 0;
113 index = smu_msg_get_index(smu, msg);
117 ret = smu_v12_0_wait_for_response(smu);
119 pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n",
122 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
124 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
126 smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
128 ret = smu_v12_0_wait_for_response(smu);
130 pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
136 static int smu_v12_0_check_fw_status(struct smu_context *smu)
138 struct amdgpu_device *adev = smu->adev;
139 uint32_t mp1_fw_flags;
141 mp1_fw_flags = RREG32_PCIE(MP1_Public |
142 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
144 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
145 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
151 static int smu_v12_0_check_fw_version(struct smu_context *smu)
153 uint32_t if_version = 0xff, smu_version = 0xff;
155 uint8_t smu_minor, smu_debug;
158 ret = smu_get_smc_version(smu, &if_version, &smu_version);
162 smu_major = (smu_version >> 16) & 0xffff;
163 smu_minor = (smu_version >> 8) & 0xff;
164 smu_debug = (smu_version >> 0) & 0xff;
167 * 1. if_version mismatch is not critical as our fw is designed
168 * to be backward compatible.
169 * 2. New fw usually brings some optimizations. But that's visible
170 * only on the paired driver.
171 * Considering above, we just leave user a warning message instead
172 * of halt driver loading.
174 if (if_version != smu->smc_if_version) {
175 pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
176 "smu fw version = 0x%08x (%d.%d.%d)\n",
177 smu->smc_if_version, if_version,
178 smu_version, smu_major, smu_minor, smu_debug);
179 pr_warn("SMU driver if version not matched\n");
185 static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
187 if (!(smu->adev->flags & AMD_IS_APU))
191 return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma);
193 return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
196 static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
198 if (!(smu->adev->flags & AMD_IS_APU))
202 return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
204 return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
207 static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
209 if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
212 return smu_v12_0_send_msg_with_param(smu,
213 SMU_MSG_SetGfxCGPG, enable ? 1 : 0);
217 * smu_v12_0_get_gfxoff_status - get gfxoff status
219 * @smu: amdgpu_device pointer
221 * This function will be used to get gfxoff status
223 * Returns 0=GFXOFF(default).
224 * Returns 1=Transition out of GFX State.
225 * Returns 2=Not in GFXOFF.
226 * Returns 3=Transition into GFXOFF.
228 static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
231 uint32_t gfxOff_Status = 0;
232 struct amdgpu_device *adev = smu->adev;
234 reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
235 gfxOff_Status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
236 >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
238 return gfxOff_Status;
241 static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
243 int ret = 0, timeout = 500;
246 ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
249 ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
251 /* confirm gfx is back to "on" state, timeout is 0.5 second */
252 while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
256 DRM_ERROR("disable gfxoff timeout and failed!\n");
265 static int smu_v12_0_init_smc_tables(struct smu_context *smu)
267 struct smu_table_context *smu_table = &smu->smu_table;
268 struct smu_table *tables = NULL;
270 if (smu_table->tables)
273 tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
278 smu_table->tables = tables;
280 return smu_tables_init(smu, tables);
283 static int smu_v12_0_fini_smc_tables(struct smu_context *smu)
285 struct smu_table_context *smu_table = &smu->smu_table;
287 if (!smu_table->tables)
290 kfree(smu_table->clocks_table);
291 kfree(smu_table->tables);
293 smu_table->clocks_table = NULL;
294 smu_table->tables = NULL;
299 static int smu_v12_0_populate_smc_tables(struct smu_context *smu)
301 struct smu_table_context *smu_table = &smu->smu_table;
302 struct smu_table *table = NULL;
304 table = &smu_table->tables[SMU_TABLE_DPMCLOCKS];
308 if (!table->cpu_addr)
311 return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
314 static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
315 uint32_t *min, uint32_t *max)
318 uint32_t mclk_mask, soc_mask;
321 ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
331 ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency);
333 pr_err("Attempt to get max GX frequency from SMC Failed !\n");
336 ret = smu_read_smc_arg(smu, max);
343 ret = smu_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
348 ret = smu_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
362 ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency);
364 pr_err("Attempt to get min GX frequency from SMC Failed !\n");
367 ret = smu_read_smc_arg(smu, min);
374 ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
379 ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
392 static int smu_v12_0_mode2_reset(struct smu_context *smu){
393 return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2);
396 static int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
397 uint32_t min, uint32_t max)
407 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min);
411 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max);
417 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min);
421 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max);
426 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min);
430 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max);
435 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min);
439 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max);
450 static const struct smu_funcs smu_v12_0_funcs = {
451 .check_fw_status = smu_v12_0_check_fw_status,
452 .check_fw_version = smu_v12_0_check_fw_version,
453 .powergate_sdma = smu_v12_0_powergate_sdma,
454 .powergate_vcn = smu_v12_0_powergate_vcn,
455 .send_smc_msg = smu_v12_0_send_msg,
456 .send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
457 .read_smc_arg = smu_v12_0_read_arg,
458 .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
459 .gfx_off_control = smu_v12_0_gfx_off_control,
460 .init_smc_tables = smu_v12_0_init_smc_tables,
461 .fini_smc_tables = smu_v12_0_fini_smc_tables,
462 .populate_smc_tables = smu_v12_0_populate_smc_tables,
463 .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
464 .mode2_reset = smu_v12_0_mode2_reset,
465 .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
468 void smu_v12_0_set_smu_funcs(struct smu_context *smu)
470 struct amdgpu_device *adev = smu->adev;
472 smu->funcs = &smu_v12_0_funcs;
474 switch (adev->asic_type) {
476 renoir_set_ppt_funcs(smu);
479 pr_warn("Unknown asic for smu12\n");