2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include "atom-types.h"
29 #include "processpptables.h"
30 #include "cgs_common.h"
33 #include "hardwaremanager.h"
35 #include "smu10_hwmgr.h"
36 #include "power_state.h"
37 #include "soc15_common.h"
39 #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
40 #define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
41 #define SCLK_MIN_DIV_INTV_SHIFT 12
42 #define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
43 #define SMC_RAM_END 0x40000
45 #define mmPWR_MISC_CNTL_STATUS 0x0183
46 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
47 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
48 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
49 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
50 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
52 static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
55 static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
56 struct pp_display_clock_request *clock_req);
59 static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps)
61 if (SMU10_Magic != hw_ps->magic)
64 return (struct smu10_power_state *)hw_ps;
67 static const struct smu10_power_state *cast_const_smu10_ps(
68 const struct pp_hw_power_state *hw_ps)
70 if (SMU10_Magic != hw_ps->magic)
73 return (struct smu10_power_state *)hw_ps;
76 static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
78 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
80 smu10_data->dce_slow_sclk_threshold = 30000;
81 smu10_data->thermal_auto_throttling_treshold = 0;
82 smu10_data->is_nb_dpm_enabled = 1;
83 smu10_data->dpm_flags = 1;
84 smu10_data->need_min_deep_sleep_dcefclk = true;
85 smu10_data->num_active_display = 0;
86 smu10_data->deep_sleep_dcefclk = 0;
88 if (hwmgr->feature_mask & PP_GFXOFF_MASK)
89 smu10_data->gfx_off_controled_by_driver = true;
91 smu10_data->gfx_off_controled_by_driver = false;
93 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
94 PHM_PlatformCaps_SclkDeepSleep);
96 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
97 PHM_PlatformCaps_SclkThrottleLowNotification);
99 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
100 PHM_PlatformCaps_PowerPlaySupport);
104 static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
105 struct phm_clock_and_voltage_limits *table)
110 static int smu10_init_dynamic_state_adjustment_rule_settings(
111 struct pp_hwmgr *hwmgr)
113 uint32_t table_size =
114 sizeof(struct phm_clock_voltage_dependency_table) +
115 (7 * sizeof(struct phm_clock_voltage_dependency_record));
117 struct phm_clock_voltage_dependency_table *table_clk_vlt =
118 kzalloc(table_size, GFP_KERNEL);
120 if (NULL == table_clk_vlt) {
121 pr_err("Can not allocate memory!\n");
125 table_clk_vlt->count = 8;
126 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
127 table_clk_vlt->entries[0].v = 0;
128 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
129 table_clk_vlt->entries[1].v = 1;
130 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
131 table_clk_vlt->entries[2].v = 2;
132 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
133 table_clk_vlt->entries[3].v = 3;
134 table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
135 table_clk_vlt->entries[4].v = 4;
136 table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
137 table_clk_vlt->entries[5].v = 5;
138 table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
139 table_clk_vlt->entries[6].v = 6;
140 table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
141 table_clk_vlt->entries[7].v = 7;
142 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
147 static int smu10_get_system_info_data(struct pp_hwmgr *hwmgr)
149 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)hwmgr->backend;
151 smu10_data->sys_info.htc_hyst_lmt = 5;
152 smu10_data->sys_info.htc_tmp_lmt = 203;
154 if (smu10_data->thermal_auto_throttling_treshold == 0)
155 smu10_data->thermal_auto_throttling_treshold = 203;
157 smu10_construct_max_power_limits_table (hwmgr,
158 &hwmgr->dyn_state.max_clock_voltage_on_ac);
160 smu10_init_dynamic_state_adjustment_rule_settings(hwmgr);
165 static int smu10_construct_boot_state(struct pp_hwmgr *hwmgr)
170 static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
172 struct PP_Clocks clocks = {0};
173 struct pp_display_clock_request clock_req;
175 clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
176 clock_req.clock_type = amd_pp_dcf_clock;
177 clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
179 PP_ASSERT_WITH_CODE(!smu10_display_clock_voltage_request(hwmgr, &clock_req),
180 "Attempt to set DCF Clock Failed!", return -EINVAL);
185 static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
187 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
189 if (smu10_data->need_min_deep_sleep_dcefclk && smu10_data->deep_sleep_dcefclk != clock/100) {
190 smu10_data->deep_sleep_dcefclk = clock/100;
191 smum_send_msg_to_smc_with_parameter(hwmgr,
192 PPSMC_MSG_SetMinDeepSleepDcefclk,
193 smu10_data->deep_sleep_dcefclk);
198 static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
200 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
202 if (smu10_data->num_active_display != count) {
203 smu10_data->num_active_display = count;
204 smum_send_msg_to_smc_with_parameter(hwmgr,
205 PPSMC_MSG_SetDisplayCount,
206 smu10_data->num_active_display);
212 static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
214 return smu10_set_clock_limit(hwmgr, input);
217 static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
219 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
220 struct amdgpu_device *adev = hwmgr->adev;
222 smu10_data->vcn_power_gated = true;
223 smu10_data->isp_tileA_power_gated = true;
224 smu10_data->isp_tileB_power_gated = true;
226 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
227 return smum_send_msg_to_smc_with_parameter(hwmgr,
228 PPSMC_MSG_SetGfxCGPG,
235 static int smu10_setup_asic_task(struct pp_hwmgr *hwmgr)
237 return smu10_init_power_gate_state(hwmgr);
240 static int smu10_reset_cc6_data(struct pp_hwmgr *hwmgr)
242 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
244 smu10_data->separation_time = 0;
245 smu10_data->cc6_disable = false;
246 smu10_data->pstate_disable = false;
247 smu10_data->cc6_setting_changed = false;
252 static int smu10_power_off_asic(struct pp_hwmgr *hwmgr)
254 return smu10_reset_cc6_data(hwmgr);
257 static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr)
260 struct amdgpu_device *adev = hwmgr->adev;
262 reg = RREG32_SOC15(PWR, 0, mmPWR_MISC_CNTL_STATUS);
263 if ((reg & PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK) ==
264 (0x2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT))
270 static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
272 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
274 if (smu10_data->gfx_off_controled_by_driver) {
275 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
277 /* confirm gfx is back to "on" state */
278 while (!smu10_is_gfx_on(hwmgr))
285 static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
287 return smu10_disable_gfx_off(hwmgr);
290 static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
292 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
294 if (smu10_data->gfx_off_controled_by_driver)
295 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
300 static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
302 return smu10_enable_gfx_off(hwmgr);
305 static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
308 return smu10_enable_gfx_off(hwmgr);
310 return smu10_disable_gfx_off(hwmgr);
313 static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
314 struct pp_power_state *prequest_ps,
315 const struct pp_power_state *pcurrent_ps)
320 /* temporary hardcoded clock voltage breakdown tables */
321 static const DpmClock_t VddDcfClk[]= {
327 static const DpmClock_t VddSocClk[]= {
333 static const DpmClock_t VddFClk[]= {
339 static const DpmClock_t VddDispClk[]= {
345 static const DpmClock_t VddDppClk[]= {
351 static const DpmClock_t VddPhyClk[]= {
357 static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
358 struct smu10_voltage_dependency_table **pptable,
359 uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
361 uint32_t table_size, i;
362 struct smu10_voltage_dependency_table *ptable;
364 table_size = sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table) * num_entry;
365 ptable = kzalloc(table_size, GFP_KERNEL);
370 ptable->count = num_entry;
372 for (i = 0; i < ptable->count; i++) {
373 ptable->entries[i].clk = pclk_dependency_table->Freq * 100;
374 ptable->entries[i].vol = pclk_dependency_table->Vol;
375 pclk_dependency_table++;
384 static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
388 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
389 DpmClocks_t *table = &(smu10_data->clock_table);
390 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
392 result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true);
394 PP_ASSERT_WITH_CODE((0 == result),
395 "Attempt to copy clock table from smc failed",
398 if (0 == result && table->DcefClocks[0].Freq != 0) {
399 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
400 NUM_DCEFCLK_DPM_LEVELS,
401 &smu10_data->clock_table.DcefClocks[0]);
402 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
403 NUM_SOCCLK_DPM_LEVELS,
404 &smu10_data->clock_table.SocClocks[0]);
405 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
407 &smu10_data->clock_table.FClocks[0]);
408 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
409 NUM_MEMCLK_DPM_LEVELS,
410 &smu10_data->clock_table.MemClocks[0]);
412 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
413 ARRAY_SIZE(VddDcfClk),
415 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
416 ARRAY_SIZE(VddSocClk),
418 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
422 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
423 ARRAY_SIZE(VddDispClk),
425 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
426 ARRAY_SIZE(VddDppClk), &VddDppClk[0]);
427 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
428 ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
430 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
431 result = smum_get_argument(hwmgr);
432 smu10_data->gfx_min_freq_limit = result * 100;
434 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
435 result = smum_get_argument(hwmgr);
436 smu10_data->gfx_max_freq_limit = result * 100;
441 static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
444 struct smu10_hwmgr *data;
446 data = kzalloc(sizeof(struct smu10_hwmgr), GFP_KERNEL);
450 hwmgr->backend = data;
452 result = smu10_initialize_dpm_defaults(hwmgr);
454 pr_err("smu10_initialize_dpm_defaults failed\n");
458 smu10_populate_clock_table(hwmgr);
460 result = smu10_get_system_info_data(hwmgr);
462 pr_err("smu10_get_system_info_data failed\n");
466 smu10_construct_boot_state(hwmgr);
468 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
469 SMU10_MAX_HARDWARE_POWERLEVELS;
471 hwmgr->platform_descriptor.hardwarePerformanceLevels =
472 SMU10_MAX_HARDWARE_POWERLEVELS;
474 hwmgr->platform_descriptor.vbiosInterruptId = 0;
476 hwmgr->platform_descriptor.clockStep.engineClock = 500;
478 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
480 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
482 hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK;
483 hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK;
488 static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
490 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
491 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
493 kfree(pinfo->vdd_dep_on_dcefclk);
494 pinfo->vdd_dep_on_dcefclk = NULL;
495 kfree(pinfo->vdd_dep_on_socclk);
496 pinfo->vdd_dep_on_socclk = NULL;
497 kfree(pinfo->vdd_dep_on_fclk);
498 pinfo->vdd_dep_on_fclk = NULL;
499 kfree(pinfo->vdd_dep_on_dispclk);
500 pinfo->vdd_dep_on_dispclk = NULL;
501 kfree(pinfo->vdd_dep_on_dppclk);
502 pinfo->vdd_dep_on_dppclk = NULL;
503 kfree(pinfo->vdd_dep_on_phyclk);
504 pinfo->vdd_dep_on_phyclk = NULL;
506 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
507 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
509 kfree(hwmgr->backend);
510 hwmgr->backend = NULL;
515 static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
516 enum amd_dpm_forced_level level)
518 if (hwmgr->smu_version < 0x1E3700) {
519 pr_info("smu firmware version too old, can not set dpm level\n");
524 case AMD_DPM_FORCED_LEVEL_HIGH:
525 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
526 smum_send_msg_to_smc_with_parameter(hwmgr,
527 PPSMC_MSG_SetHardMinGfxClk,
528 SMU10_UMD_PSTATE_PEAK_GFXCLK);
529 smum_send_msg_to_smc_with_parameter(hwmgr,
530 PPSMC_MSG_SetHardMinFclkByFreq,
531 SMU10_UMD_PSTATE_PEAK_FCLK);
532 smum_send_msg_to_smc_with_parameter(hwmgr,
533 PPSMC_MSG_SetHardMinSocclkByFreq,
534 SMU10_UMD_PSTATE_PEAK_SOCCLK);
535 smum_send_msg_to_smc_with_parameter(hwmgr,
536 PPSMC_MSG_SetHardMinVcn,
537 SMU10_UMD_PSTATE_VCE);
539 smum_send_msg_to_smc_with_parameter(hwmgr,
540 PPSMC_MSG_SetSoftMaxGfxClk,
541 SMU10_UMD_PSTATE_PEAK_GFXCLK);
542 smum_send_msg_to_smc_with_parameter(hwmgr,
543 PPSMC_MSG_SetSoftMaxFclkByFreq,
544 SMU10_UMD_PSTATE_PEAK_FCLK);
545 smum_send_msg_to_smc_with_parameter(hwmgr,
546 PPSMC_MSG_SetSoftMaxSocclkByFreq,
547 SMU10_UMD_PSTATE_PEAK_SOCCLK);
548 smum_send_msg_to_smc_with_parameter(hwmgr,
549 PPSMC_MSG_SetSoftMaxVcn,
550 SMU10_UMD_PSTATE_VCE);
552 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
553 smum_send_msg_to_smc_with_parameter(hwmgr,
554 PPSMC_MSG_SetHardMinGfxClk,
555 SMU10_UMD_PSTATE_MIN_GFXCLK);
556 smum_send_msg_to_smc_with_parameter(hwmgr,
557 PPSMC_MSG_SetSoftMaxGfxClk,
558 SMU10_UMD_PSTATE_MIN_GFXCLK);
560 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
561 smum_send_msg_to_smc_with_parameter(hwmgr,
562 PPSMC_MSG_SetHardMinFclkByFreq,
563 SMU10_UMD_PSTATE_MIN_FCLK);
564 smum_send_msg_to_smc_with_parameter(hwmgr,
565 PPSMC_MSG_SetSoftMaxFclkByFreq,
566 SMU10_UMD_PSTATE_MIN_FCLK);
568 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
569 smum_send_msg_to_smc_with_parameter(hwmgr,
570 PPSMC_MSG_SetHardMinGfxClk,
571 SMU10_UMD_PSTATE_GFXCLK);
572 smum_send_msg_to_smc_with_parameter(hwmgr,
573 PPSMC_MSG_SetHardMinFclkByFreq,
574 SMU10_UMD_PSTATE_FCLK);
575 smum_send_msg_to_smc_with_parameter(hwmgr,
576 PPSMC_MSG_SetHardMinSocclkByFreq,
577 SMU10_UMD_PSTATE_SOCCLK);
578 smum_send_msg_to_smc_with_parameter(hwmgr,
579 PPSMC_MSG_SetHardMinVcn,
580 SMU10_UMD_PSTATE_VCE);
582 smum_send_msg_to_smc_with_parameter(hwmgr,
583 PPSMC_MSG_SetSoftMaxGfxClk,
584 SMU10_UMD_PSTATE_GFXCLK);
585 smum_send_msg_to_smc_with_parameter(hwmgr,
586 PPSMC_MSG_SetSoftMaxFclkByFreq,
587 SMU10_UMD_PSTATE_FCLK);
588 smum_send_msg_to_smc_with_parameter(hwmgr,
589 PPSMC_MSG_SetSoftMaxSocclkByFreq,
590 SMU10_UMD_PSTATE_SOCCLK);
591 smum_send_msg_to_smc_with_parameter(hwmgr,
592 PPSMC_MSG_SetSoftMaxVcn,
593 SMU10_UMD_PSTATE_VCE);
595 case AMD_DPM_FORCED_LEVEL_AUTO:
596 smum_send_msg_to_smc_with_parameter(hwmgr,
597 PPSMC_MSG_SetHardMinGfxClk,
598 SMU10_UMD_PSTATE_MIN_GFXCLK);
599 smum_send_msg_to_smc_with_parameter(hwmgr,
600 PPSMC_MSG_SetHardMinFclkByFreq,
601 SMU10_UMD_PSTATE_MIN_FCLK);
602 smum_send_msg_to_smc_with_parameter(hwmgr,
603 PPSMC_MSG_SetHardMinSocclkByFreq,
604 SMU10_UMD_PSTATE_MIN_SOCCLK);
605 smum_send_msg_to_smc_with_parameter(hwmgr,
606 PPSMC_MSG_SetHardMinVcn,
607 SMU10_UMD_PSTATE_MIN_VCE);
609 smum_send_msg_to_smc_with_parameter(hwmgr,
610 PPSMC_MSG_SetSoftMaxGfxClk,
611 SMU10_UMD_PSTATE_PEAK_GFXCLK);
612 smum_send_msg_to_smc_with_parameter(hwmgr,
613 PPSMC_MSG_SetSoftMaxFclkByFreq,
614 SMU10_UMD_PSTATE_PEAK_FCLK);
615 smum_send_msg_to_smc_with_parameter(hwmgr,
616 PPSMC_MSG_SetSoftMaxSocclkByFreq,
617 SMU10_UMD_PSTATE_PEAK_SOCCLK);
618 smum_send_msg_to_smc_with_parameter(hwmgr,
619 PPSMC_MSG_SetSoftMaxVcn,
620 SMU10_UMD_PSTATE_VCE);
622 case AMD_DPM_FORCED_LEVEL_LOW:
623 smum_send_msg_to_smc_with_parameter(hwmgr,
624 PPSMC_MSG_SetHardMinGfxClk,
625 SMU10_UMD_PSTATE_MIN_GFXCLK);
626 smum_send_msg_to_smc_with_parameter(hwmgr,
627 PPSMC_MSG_SetSoftMaxGfxClk,
628 SMU10_UMD_PSTATE_MIN_GFXCLK);
629 smum_send_msg_to_smc_with_parameter(hwmgr,
630 PPSMC_MSG_SetHardMinFclkByFreq,
631 SMU10_UMD_PSTATE_MIN_FCLK);
632 smum_send_msg_to_smc_with_parameter(hwmgr,
633 PPSMC_MSG_SetSoftMaxFclkByFreq,
634 SMU10_UMD_PSTATE_MIN_FCLK);
636 case AMD_DPM_FORCED_LEVEL_MANUAL:
637 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
644 static uint32_t smu10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
646 struct smu10_hwmgr *data;
651 data = (struct smu10_hwmgr *)(hwmgr->backend);
654 return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
656 return data->clock_vol_info.vdd_dep_on_fclk->entries[
657 data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
660 static uint32_t smu10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
662 struct smu10_hwmgr *data;
667 data = (struct smu10_hwmgr *)(hwmgr->backend);
670 return data->gfx_min_freq_limit;
672 return data->gfx_max_freq_limit;
675 static int smu10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
676 struct pp_hw_power_state *hw_ps)
681 static int smu10_dpm_get_pp_table_entry_callback(
682 struct pp_hwmgr *hwmgr,
683 struct pp_hw_power_state *hw_ps,
685 const void *clock_info)
687 struct smu10_power_state *smu10_ps = cast_smu10_ps(hw_ps);
689 smu10_ps->levels[index].engine_clock = 0;
691 smu10_ps->levels[index].vddc_index = 0;
692 smu10_ps->level = index + 1;
694 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
695 smu10_ps->levels[index].ds_divider_index = 5;
696 smu10_ps->levels[index].ss_divider_index = 5;
702 static int smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
705 unsigned long ret = 0;
707 result = pp_tables_get_num_of_entries(hwmgr, &ret);
709 return result ? 0 : ret;
712 static int smu10_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
713 unsigned long entry, struct pp_power_state *ps)
716 struct smu10_power_state *smu10_ps;
718 ps->hardware.magic = SMU10_Magic;
720 smu10_ps = cast_smu10_ps(&(ps->hardware));
722 result = pp_tables_get_entry(hwmgr, entry, ps,
723 smu10_dpm_get_pp_table_entry_callback);
725 smu10_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
726 smu10_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
731 static int smu10_get_power_state_size(struct pp_hwmgr *hwmgr)
733 return sizeof(struct smu10_power_state);
736 static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr)
742 static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
743 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
745 struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
747 if (separation_time != data->separation_time ||
748 cc6_disable != data->cc6_disable ||
749 pstate_disable != data->pstate_disable) {
750 data->separation_time = separation_time;
751 data->cc6_disable = cc6_disable;
752 data->pstate_disable = pstate_disable;
753 data->cc6_setting_changed = true;
758 static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr,
759 struct amd_pp_simple_clock_info *info)
764 static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
765 enum pp_clock_type type, uint32_t mask)
770 static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
771 enum pp_clock_type type, char *buf)
773 struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
774 struct smu10_voltage_dependency_table *mclk_table =
775 data->clock_vol_info.vdd_dep_on_fclk;
776 int i, now, size = 0;
780 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
781 now = smum_get_argument(hwmgr);
783 size += sprintf(buf + size, "0: %uMhz %s\n",
784 data->gfx_min_freq_limit / 100,
785 ((data->gfx_min_freq_limit / 100)
787 size += sprintf(buf + size, "1: %uMhz %s\n",
788 data->gfx_max_freq_limit / 100,
789 ((data->gfx_max_freq_limit / 100)
793 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
794 now = smum_get_argument(hwmgr);
796 for (i = 0; i < mclk_table->count; i++)
797 size += sprintf(buf + size, "%d: %uMhz %s\n",
799 mclk_table->entries[i].clk / 100,
800 ((mclk_table->entries[i].clk / 100)
810 static int smu10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
811 PHM_PerformanceLevelDesignation designation, uint32_t index,
812 PHM_PerformanceLevel *level)
814 struct smu10_hwmgr *data;
816 if (level == NULL || hwmgr == NULL || state == NULL)
819 data = (struct smu10_hwmgr *)(hwmgr->backend);
822 level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
823 level->coreClock = data->gfx_min_freq_limit;
825 level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[
826 data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
827 level->coreClock = data->gfx_max_freq_limit;
830 level->nonLocalMemoryFreq = 0;
831 level->nonLocalMemoryWidth = 0;
836 static int smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
837 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
839 const struct smu10_power_state *ps = cast_const_smu10_ps(state);
841 clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index));
842 clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index));
847 #define MEM_FREQ_LOW_LATENCY 25000
848 #define MEM_FREQ_HIGH_LATENCY 80000
849 #define MEM_LATENCY_HIGH 245
850 #define MEM_LATENCY_LOW 35
851 #define MEM_LATENCY_ERR 0xFFFF
854 static uint32_t smu10_get_mem_latency(struct pp_hwmgr *hwmgr,
857 if (clock >= MEM_FREQ_LOW_LATENCY &&
858 clock < MEM_FREQ_HIGH_LATENCY)
859 return MEM_LATENCY_HIGH;
860 else if (clock >= MEM_FREQ_HIGH_LATENCY)
861 return MEM_LATENCY_LOW;
863 return MEM_LATENCY_ERR;
866 static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
867 enum amd_pp_clock_type type,
868 struct pp_clock_levels_with_latency *clocks)
871 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
872 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
873 struct smu10_voltage_dependency_table *pclk_vol_table;
874 bool latency_required = false;
880 case amd_pp_mem_clock:
881 pclk_vol_table = pinfo->vdd_dep_on_mclk;
882 latency_required = true;
885 pclk_vol_table = pinfo->vdd_dep_on_fclk;
886 latency_required = true;
888 case amd_pp_dcf_clock:
889 pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
891 case amd_pp_disp_clock:
892 pclk_vol_table = pinfo->vdd_dep_on_dispclk;
894 case amd_pp_phy_clock:
895 pclk_vol_table = pinfo->vdd_dep_on_phyclk;
897 case amd_pp_dpp_clock:
898 pclk_vol_table = pinfo->vdd_dep_on_dppclk;
903 if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
906 clocks->num_levels = 0;
907 for (i = 0; i < pclk_vol_table->count; i++) {
908 clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
909 clocks->data[i].latency_in_us = latency_required ?
910 smu10_get_mem_latency(hwmgr,
911 pclk_vol_table->entries[i].clk) :
913 clocks->num_levels++;
919 static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
920 enum amd_pp_clock_type type,
921 struct pp_clock_levels_with_voltage *clocks)
924 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
925 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
926 struct smu10_voltage_dependency_table *pclk_vol_table = NULL;
932 case amd_pp_mem_clock:
933 pclk_vol_table = pinfo->vdd_dep_on_mclk;
936 pclk_vol_table = pinfo->vdd_dep_on_fclk;
938 case amd_pp_dcf_clock:
939 pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
941 case amd_pp_soc_clock:
942 pclk_vol_table = pinfo->vdd_dep_on_socclk;
948 if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
951 clocks->num_levels = 0;
952 for (i = 0; i < pclk_vol_table->count; i++) {
953 clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
954 clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
955 clocks->num_levels++;
961 static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
962 struct pp_display_clock_request *clock_req)
964 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
965 enum amd_pp_clock_type clk_type = clock_req->clock_type;
966 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
970 case amd_pp_dcf_clock:
971 if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
973 msg = PPSMC_MSG_SetHardMinDcefclkByFreq;
974 smu10_data->dcf_actual_hard_min_freq = clk_freq;
976 case amd_pp_soc_clock:
977 msg = PPSMC_MSG_SetHardMinSocclkByFreq;
980 if (clk_freq == smu10_data->f_actual_hard_min_freq)
982 smu10_data->f_actual_hard_min_freq = clk_freq;
983 msg = PPSMC_MSG_SetHardMinFclkByFreq;
986 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
990 smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
995 static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
997 clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
1001 static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1003 struct amdgpu_device *adev = hwmgr->adev;
1004 uint32_t reg_value = RREG32_SOC15(THM, 0, mmTHM_TCON_CUR_TMP);
1006 (reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT;
1008 if (cur_temp & THM_TCON_CUR_TMP__CUR_TEMP_RANGE_SEL_MASK)
1009 cur_temp = ((cur_temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1011 cur_temp = (cur_temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1016 static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1017 void *value, int *size)
1019 uint32_t sclk, mclk;
1023 case AMDGPU_PP_SENSOR_GFX_SCLK:
1024 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
1025 sclk = smum_get_argument(hwmgr);
1026 /* in units of 10KHZ */
1027 *((uint32_t *)value) = sclk * 100;
1030 case AMDGPU_PP_SENSOR_GFX_MCLK:
1031 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
1032 mclk = smum_get_argument(hwmgr);
1033 /* in units of 10KHZ */
1034 *((uint32_t *)value) = mclk * 100;
1037 case AMDGPU_PP_SENSOR_GPU_TEMP:
1038 *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
1048 static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
1049 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
1051 struct smu10_hwmgr *data = hwmgr->backend;
1052 Watermarks_t *table = &(data->water_marks_table);
1055 smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges);
1056 smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
1057 data->water_marks_exist = true;
1060 static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
1062 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
1065 static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
1066 .backend_init = smu10_hwmgr_backend_init,
1067 .backend_fini = smu10_hwmgr_backend_fini,
1069 .apply_state_adjust_rules = smu10_apply_state_adjust_rules,
1070 .force_dpm_level = smu10_dpm_force_dpm_level,
1071 .get_power_state_size = smu10_get_power_state_size,
1072 .powerdown_uvd = NULL,
1073 .powergate_uvd = NULL,
1074 .powergate_vce = NULL,
1075 .get_mclk = smu10_dpm_get_mclk,
1076 .get_sclk = smu10_dpm_get_sclk,
1077 .patch_boot_state = smu10_dpm_patch_boot_state,
1078 .get_pp_table_entry = smu10_dpm_get_pp_table_entry,
1079 .get_num_of_pp_table_entries = smu10_dpm_get_num_of_pp_table_entries,
1080 .set_cpu_power_state = smu10_set_cpu_power_state,
1081 .store_cc6_data = smu10_store_cc6_data,
1082 .force_clock_level = smu10_force_clock_level,
1083 .print_clock_levels = smu10_print_clock_levels,
1084 .get_dal_power_level = smu10_get_dal_power_level,
1085 .get_performance_level = smu10_get_performance_level,
1086 .get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks,
1087 .get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency,
1088 .get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage,
1089 .set_watermarks_for_clocks_ranges = smu10_set_watermarks_for_clocks_ranges,
1090 .get_max_high_clocks = smu10_get_max_high_clocks,
1091 .read_sensor = smu10_read_sensor,
1092 .set_active_display_count = smu10_set_active_display_count,
1093 .set_deep_sleep_dcefclk = smu10_set_deep_sleep_dcefclk,
1094 .dynamic_state_management_enable = smu10_enable_dpm_tasks,
1095 .power_off_asic = smu10_power_off_asic,
1096 .asic_setup = smu10_setup_asic_task,
1097 .power_state_set = smu10_set_power_state_tasks,
1098 .dynamic_state_management_disable = smu10_disable_dpm_tasks,
1099 .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
1100 .gfx_off_control = smu10_gfx_off_control,
1103 int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
1105 hwmgr->hwmgr_func = &smu10_hwmgr_funcs;
1106 hwmgr->pptable_func = &pptable_funcs;