2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "smu7_hwmgr.h"
25 #include "smu7_clockpowergating.h"
26 #include "smu7_common.h"
28 static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
30 return smum_send_msg_to_smc(hwmgr, enable ?
31 PPSMC_MSG_UVDDPM_Enable :
32 PPSMC_MSG_UVDDPM_Disable);
35 static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
37 return smum_send_msg_to_smc(hwmgr, enable ?
38 PPSMC_MSG_VCEDPM_Enable :
39 PPSMC_MSG_VCEDPM_Disable);
42 static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
45 smum_update_smc_table(hwmgr, SMU_UVD_TABLE);
46 return smu7_enable_disable_uvd_dpm(hwmgr, !bgate);
49 static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
52 smum_update_smc_table(hwmgr, SMU_VCE_TABLE);
53 return smu7_enable_disable_vce_dpm(hwmgr, !bgate);
56 int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
58 if (phm_cf_want_uvd_power_gating(hwmgr))
59 return smum_send_msg_to_smc(hwmgr,
60 PPSMC_MSG_UVDPowerOFF);
64 static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
66 if (phm_cf_want_uvd_power_gating(hwmgr)) {
67 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
68 PHM_PlatformCaps_UVDDynamicPowerGating)) {
69 return smum_send_msg_to_smc_with_parameter(hwmgr,
70 PPSMC_MSG_UVDPowerON, 1);
72 return smum_send_msg_to_smc_with_parameter(hwmgr,
73 PPSMC_MSG_UVDPowerON, 0);
80 static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
82 if (phm_cf_want_vce_power_gating(hwmgr))
83 return smum_send_msg_to_smc(hwmgr,
84 PPSMC_MSG_VCEPowerOFF);
88 static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
90 if (phm_cf_want_vce_power_gating(hwmgr))
91 return smum_send_msg_to_smc(hwmgr,
92 PPSMC_MSG_VCEPowerON);
96 int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
98 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
100 data->uvd_power_gated = false;
101 data->vce_power_gated = false;
103 smu7_powerup_uvd(hwmgr);
104 smu7_powerup_vce(hwmgr);
109 void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
111 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
113 data->uvd_power_gated = bgate;
116 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
117 AMD_IP_BLOCK_TYPE_UVD,
119 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
120 AMD_IP_BLOCK_TYPE_UVD,
122 smu7_update_uvd_dpm(hwmgr, true);
123 smu7_powerdown_uvd(hwmgr);
125 smu7_powerup_uvd(hwmgr);
126 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
127 AMD_IP_BLOCK_TYPE_UVD,
128 AMD_CG_STATE_UNGATE);
129 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
130 AMD_IP_BLOCK_TYPE_UVD,
131 AMD_PG_STATE_UNGATE);
132 smu7_update_uvd_dpm(hwmgr, false);
137 void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
139 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
141 data->vce_power_gated = bgate;
144 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
145 AMD_IP_BLOCK_TYPE_VCE,
147 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
148 AMD_IP_BLOCK_TYPE_VCE,
150 smu7_update_vce_dpm(hwmgr, true);
151 smu7_powerdown_vce(hwmgr);
153 smu7_powerup_vce(hwmgr);
154 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
155 AMD_IP_BLOCK_TYPE_VCE,
156 AMD_CG_STATE_UNGATE);
157 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
158 AMD_IP_BLOCK_TYPE_VCE,
159 AMD_PG_STATE_UNGATE);
160 smu7_update_vce_dpm(hwmgr, false);
164 int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
165 const uint32_t *msg_id)
170 if (!(hwmgr->feature_mask & PP_ENABLE_GFX_CG_THRU_SMU))
173 switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
175 switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
176 case PP_BLOCK_GFX_CG:
177 if (PP_STATE_SUPPORT_CG & *msg_id) {
178 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
179 PPSMC_MSG_EnableClockGatingFeature :
180 PPSMC_MSG_DisableClockGatingFeature;
181 value = CG_GFX_CGCG_MASK;
183 if (smum_send_msg_to_smc_with_parameter(
187 if (PP_STATE_SUPPORT_LS & *msg_id) {
188 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
189 ? PPSMC_MSG_EnableClockGatingFeature
190 : PPSMC_MSG_DisableClockGatingFeature;
191 value = CG_GFX_CGLS_MASK;
193 if (smum_send_msg_to_smc_with_parameter(
199 case PP_BLOCK_GFX_3D:
200 if (PP_STATE_SUPPORT_CG & *msg_id) {
201 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
202 PPSMC_MSG_EnableClockGatingFeature :
203 PPSMC_MSG_DisableClockGatingFeature;
204 value = CG_GFX_3DCG_MASK;
206 if (smum_send_msg_to_smc_with_parameter(
211 if (PP_STATE_SUPPORT_LS & *msg_id) {
212 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
213 PPSMC_MSG_EnableClockGatingFeature :
214 PPSMC_MSG_DisableClockGatingFeature;
215 value = CG_GFX_3DLS_MASK;
217 if (smum_send_msg_to_smc_with_parameter(
223 case PP_BLOCK_GFX_RLC:
224 if (PP_STATE_SUPPORT_LS & *msg_id) {
225 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
226 PPSMC_MSG_EnableClockGatingFeature :
227 PPSMC_MSG_DisableClockGatingFeature;
228 value = CG_GFX_RLC_LS_MASK;
230 if (smum_send_msg_to_smc_with_parameter(
236 case PP_BLOCK_GFX_CP:
237 if (PP_STATE_SUPPORT_LS & *msg_id) {
238 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
239 PPSMC_MSG_EnableClockGatingFeature :
240 PPSMC_MSG_DisableClockGatingFeature;
241 value = CG_GFX_CP_LS_MASK;
243 if (smum_send_msg_to_smc_with_parameter(
249 case PP_BLOCK_GFX_MG:
250 if (PP_STATE_SUPPORT_CG & *msg_id) {
251 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
252 PPSMC_MSG_EnableClockGatingFeature :
253 PPSMC_MSG_DisableClockGatingFeature;
254 value = (CG_CPF_MGCG_MASK | CG_RLC_MGCG_MASK |
255 CG_GFX_OTHERS_MGCG_MASK);
257 if (smum_send_msg_to_smc_with_parameter(
269 switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
270 case PP_BLOCK_SYS_BIF:
271 if (PP_STATE_SUPPORT_CG & *msg_id) {
272 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ?
273 PPSMC_MSG_EnableClockGatingFeature :
274 PPSMC_MSG_DisableClockGatingFeature;
275 value = CG_SYS_BIF_MGCG_MASK;
277 if (smum_send_msg_to_smc_with_parameter(
281 if (PP_STATE_SUPPORT_LS & *msg_id) {
282 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
283 PPSMC_MSG_EnableClockGatingFeature :
284 PPSMC_MSG_DisableClockGatingFeature;
285 value = CG_SYS_BIF_MGLS_MASK;
287 if (smum_send_msg_to_smc_with_parameter(
293 case PP_BLOCK_SYS_MC:
294 if (PP_STATE_SUPPORT_CG & *msg_id) {
295 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
296 PPSMC_MSG_EnableClockGatingFeature :
297 PPSMC_MSG_DisableClockGatingFeature;
298 value = CG_SYS_MC_MGCG_MASK;
300 if (smum_send_msg_to_smc_with_parameter(
305 if (PP_STATE_SUPPORT_LS & *msg_id) {
306 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
307 PPSMC_MSG_EnableClockGatingFeature :
308 PPSMC_MSG_DisableClockGatingFeature;
309 value = CG_SYS_MC_MGLS_MASK;
311 if (smum_send_msg_to_smc_with_parameter(
317 case PP_BLOCK_SYS_DRM:
318 if (PP_STATE_SUPPORT_CG & *msg_id) {
319 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ?
320 PPSMC_MSG_EnableClockGatingFeature :
321 PPSMC_MSG_DisableClockGatingFeature;
322 value = CG_SYS_DRM_MGCG_MASK;
324 if (smum_send_msg_to_smc_with_parameter(
328 if (PP_STATE_SUPPORT_LS & *msg_id) {
329 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
330 PPSMC_MSG_EnableClockGatingFeature :
331 PPSMC_MSG_DisableClockGatingFeature;
332 value = CG_SYS_DRM_MGLS_MASK;
334 if (smum_send_msg_to_smc_with_parameter(
340 case PP_BLOCK_SYS_HDP:
341 if (PP_STATE_SUPPORT_CG & *msg_id) {
342 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
343 PPSMC_MSG_EnableClockGatingFeature :
344 PPSMC_MSG_DisableClockGatingFeature;
345 value = CG_SYS_HDP_MGCG_MASK;
347 if (smum_send_msg_to_smc_with_parameter(
352 if (PP_STATE_SUPPORT_LS & *msg_id) {
353 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
354 PPSMC_MSG_EnableClockGatingFeature :
355 PPSMC_MSG_DisableClockGatingFeature;
356 value = CG_SYS_HDP_MGLS_MASK;
358 if (smum_send_msg_to_smc_with_parameter(
364 case PP_BLOCK_SYS_SDMA:
365 if (PP_STATE_SUPPORT_CG & *msg_id) {
366 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
367 PPSMC_MSG_EnableClockGatingFeature :
368 PPSMC_MSG_DisableClockGatingFeature;
369 value = CG_SYS_SDMA_MGCG_MASK;
371 if (smum_send_msg_to_smc_with_parameter(
376 if (PP_STATE_SUPPORT_LS & *msg_id) {
377 msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
378 PPSMC_MSG_EnableClockGatingFeature :
379 PPSMC_MSG_DisableClockGatingFeature;
380 value = CG_SYS_SDMA_MGLS_MASK;
382 if (smum_send_msg_to_smc_with_parameter(
388 case PP_BLOCK_SYS_ROM:
389 if (PP_STATE_SUPPORT_CG & *msg_id) {
390 msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
391 PPSMC_MSG_EnableClockGatingFeature :
392 PPSMC_MSG_DisableClockGatingFeature;
393 value = CG_SYS_ROM_MASK;
395 if (smum_send_msg_to_smc_with_parameter(
415 /* This function is for Polaris11 only for now,
416 * Powerplay will only control the static per CU Power Gating.
417 * Dynamic per CU Power Gating will be done in gfx.
419 int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable)
421 struct amdgpu_device *adev = hwmgr->adev;
424 return smum_send_msg_to_smc_with_parameter(hwmgr,
425 PPSMC_MSG_GFX_CU_PG_ENABLE,
426 adev->gfx.cu_info.number);
428 return smum_send_msg_to_smc(hwmgr,
429 PPSMC_MSG_GFX_CU_PG_DISABLE);