]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drm/amd/powerplay: dynamically disable ds and ulv for compute
[linux.git] / drivers / gpu / drm / amd / powerplay / amd_powerplay.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include "amd_shared.h"
30 #include "amd_powerplay.h"
31 #include "power_state.h"
32 #include "amdgpu.h"
33 #include "hwmgr.h"
34
35
36 static const struct amd_pm_funcs pp_dpm_funcs;
37
38 static int amd_powerplay_create(struct amdgpu_device *adev)
39 {
40         struct pp_hwmgr *hwmgr;
41
42         if (adev == NULL)
43                 return -EINVAL;
44
45         hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
46         if (hwmgr == NULL)
47                 return -ENOMEM;
48
49         hwmgr->adev = adev;
50         hwmgr->not_vf = !amdgpu_sriov_vf(adev);
51         hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
52         hwmgr->device = amdgpu_cgs_create_device(adev);
53         mutex_init(&hwmgr->smu_lock);
54         hwmgr->chip_family = adev->family;
55         hwmgr->chip_id = adev->asic_type;
56         hwmgr->feature_mask = adev->pm.pp_feature;
57         hwmgr->display_config = &adev->pm.pm_display_cfg;
58         adev->powerplay.pp_handle = hwmgr;
59         adev->powerplay.pp_funcs = &pp_dpm_funcs;
60         return 0;
61 }
62
63
64 static void amd_powerplay_destroy(struct amdgpu_device *adev)
65 {
66         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
67
68         kfree(hwmgr->hardcode_pp_table);
69         hwmgr->hardcode_pp_table = NULL;
70
71         kfree(hwmgr);
72         hwmgr = NULL;
73 }
74
75 static int pp_early_init(void *handle)
76 {
77         int ret;
78         struct amdgpu_device *adev = handle;
79
80         ret = amd_powerplay_create(adev);
81
82         if (ret != 0)
83                 return ret;
84
85         ret = hwmgr_early_init(adev->powerplay.pp_handle);
86         if (ret)
87                 return -EINVAL;
88
89         return 0;
90 }
91
92 static int pp_sw_init(void *handle)
93 {
94         struct amdgpu_device *adev = handle;
95         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
96         int ret = 0;
97
98         ret = hwmgr_sw_init(hwmgr);
99
100         pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
101
102         return ret;
103 }
104
105 static int pp_sw_fini(void *handle)
106 {
107         struct amdgpu_device *adev = handle;
108         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
109
110         hwmgr_sw_fini(hwmgr);
111
112         release_firmware(adev->pm.fw);
113         adev->pm.fw = NULL;
114
115         return 0;
116 }
117
118 static int pp_hw_init(void *handle)
119 {
120         int ret = 0;
121         struct amdgpu_device *adev = handle;
122         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
123
124         ret = hwmgr_hw_init(hwmgr);
125
126         if (ret)
127                 pr_err("powerplay hw init failed\n");
128
129         return ret;
130 }
131
132 static int pp_hw_fini(void *handle)
133 {
134         struct amdgpu_device *adev = handle;
135         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
136
137         hwmgr_hw_fini(hwmgr);
138
139         return 0;
140 }
141
142 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
143 {
144         int r = -EINVAL;
145         void *cpu_ptr = NULL;
146         uint64_t gpu_addr;
147         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
148
149         if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
150                                                 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
151                                                 &adev->pm.smu_prv_buffer,
152                                                 &gpu_addr,
153                                                 &cpu_ptr)) {
154                 DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
155                 return;
156         }
157
158         if (hwmgr->hwmgr_func->notify_cac_buffer_info)
159                 r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
160                                         lower_32_bits((unsigned long)cpu_ptr),
161                                         upper_32_bits((unsigned long)cpu_ptr),
162                                         lower_32_bits(gpu_addr),
163                                         upper_32_bits(gpu_addr),
164                                         adev->pm.smu_prv_buffer_size);
165
166         if (r) {
167                 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
168                 adev->pm.smu_prv_buffer = NULL;
169                 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
170         }
171 }
172
173 static int pp_late_init(void *handle)
174 {
175         struct amdgpu_device *adev = handle;
176         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
177
178         if (hwmgr && hwmgr->pm_en) {
179                 mutex_lock(&hwmgr->smu_lock);
180                 hwmgr_handle_task(hwmgr,
181                                         AMD_PP_TASK_COMPLETE_INIT, NULL);
182                 mutex_unlock(&hwmgr->smu_lock);
183         }
184         if (adev->pm.smu_prv_buffer_size != 0)
185                 pp_reserve_vram_for_smu(adev);
186
187         return 0;
188 }
189
190 static void pp_late_fini(void *handle)
191 {
192         struct amdgpu_device *adev = handle;
193
194         if (adev->pm.smu_prv_buffer)
195                 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
196         amd_powerplay_destroy(adev);
197 }
198
199
200 static bool pp_is_idle(void *handle)
201 {
202         return false;
203 }
204
205 static int pp_wait_for_idle(void *handle)
206 {
207         return 0;
208 }
209
210 static int pp_sw_reset(void *handle)
211 {
212         return 0;
213 }
214
215 static int pp_set_powergating_state(void *handle,
216                                     enum amd_powergating_state state)
217 {
218         return 0;
219 }
220
221 static int pp_suspend(void *handle)
222 {
223         struct amdgpu_device *adev = handle;
224         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
225
226         return hwmgr_suspend(hwmgr);
227 }
228
229 static int pp_resume(void *handle)
230 {
231         struct amdgpu_device *adev = handle;
232         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
233
234         return hwmgr_resume(hwmgr);
235 }
236
237 static int pp_set_clockgating_state(void *handle,
238                                           enum amd_clockgating_state state)
239 {
240         return 0;
241 }
242
243 static const struct amd_ip_funcs pp_ip_funcs = {
244         .name = "powerplay",
245         .early_init = pp_early_init,
246         .late_init = pp_late_init,
247         .sw_init = pp_sw_init,
248         .sw_fini = pp_sw_fini,
249         .hw_init = pp_hw_init,
250         .hw_fini = pp_hw_fini,
251         .late_fini = pp_late_fini,
252         .suspend = pp_suspend,
253         .resume = pp_resume,
254         .is_idle = pp_is_idle,
255         .wait_for_idle = pp_wait_for_idle,
256         .soft_reset = pp_sw_reset,
257         .set_clockgating_state = pp_set_clockgating_state,
258         .set_powergating_state = pp_set_powergating_state,
259 };
260
261 const struct amdgpu_ip_block_version pp_smu_ip_block =
262 {
263         .type = AMD_IP_BLOCK_TYPE_SMC,
264         .major = 1,
265         .minor = 0,
266         .rev = 0,
267         .funcs = &pp_ip_funcs,
268 };
269
270 /* This interface only be supported On Vi,
271  * because only smu7/8 can help to load gfx/sdma fw,
272  * smu need to be enabled before load other ip's fw.
273  * so call start smu to load smu7 fw and other ip's fw
274  */
275 static int pp_dpm_load_fw(void *handle)
276 {
277         struct pp_hwmgr *hwmgr = handle;
278
279         if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
280                 return -EINVAL;
281
282         if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
283                 pr_err("fw load failed\n");
284                 return -EINVAL;
285         }
286
287         return 0;
288 }
289
290 static int pp_dpm_fw_loading_complete(void *handle)
291 {
292         return 0;
293 }
294
295 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
296 {
297         struct pp_hwmgr *hwmgr = handle;
298
299         if (!hwmgr || !hwmgr->pm_en)
300                 return -EINVAL;
301
302         if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
303                 pr_info_ratelimited("%s was not implemented.\n", __func__);
304                 return 0;
305         }
306
307         return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
308 }
309
310 static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
311                                                 enum amd_dpm_forced_level *level)
312 {
313         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
314                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
315                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
316                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
317
318         if (!(hwmgr->dpm_level & profile_mode_mask)) {
319                 /* enter umd pstate, save current level, disable gfx cg*/
320                 if (*level & profile_mode_mask) {
321                         hwmgr->saved_dpm_level = hwmgr->dpm_level;
322                         hwmgr->en_umd_pstate = true;
323                         amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
324                                                 AMD_IP_BLOCK_TYPE_GFX,
325                                                 AMD_CG_STATE_UNGATE);
326                         amdgpu_device_ip_set_powergating_state(hwmgr->adev,
327                                         AMD_IP_BLOCK_TYPE_GFX,
328                                         AMD_PG_STATE_UNGATE);
329                 }
330         } else {
331                 /* exit umd pstate, restore level, enable gfx cg*/
332                 if (!(*level & profile_mode_mask)) {
333                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
334                                 *level = hwmgr->saved_dpm_level;
335                         hwmgr->en_umd_pstate = false;
336                         amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
337                                         AMD_IP_BLOCK_TYPE_GFX,
338                                         AMD_CG_STATE_GATE);
339                         amdgpu_device_ip_set_powergating_state(hwmgr->adev,
340                                         AMD_IP_BLOCK_TYPE_GFX,
341                                         AMD_PG_STATE_GATE);
342                 }
343         }
344 }
345
346 static int pp_dpm_force_performance_level(void *handle,
347                                         enum amd_dpm_forced_level level)
348 {
349         struct pp_hwmgr *hwmgr = handle;
350
351         if (!hwmgr || !hwmgr->pm_en)
352                 return -EINVAL;
353
354         if (level == hwmgr->dpm_level)
355                 return 0;
356
357         mutex_lock(&hwmgr->smu_lock);
358         pp_dpm_en_umd_pstate(hwmgr, &level);
359         hwmgr->request_dpm_level = level;
360         hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
361         mutex_unlock(&hwmgr->smu_lock);
362
363         return 0;
364 }
365
366 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
367                                                                 void *handle)
368 {
369         struct pp_hwmgr *hwmgr = handle;
370         enum amd_dpm_forced_level level;
371
372         if (!hwmgr || !hwmgr->pm_en)
373                 return -EINVAL;
374
375         mutex_lock(&hwmgr->smu_lock);
376         level = hwmgr->dpm_level;
377         mutex_unlock(&hwmgr->smu_lock);
378         return level;
379 }
380
381 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
382 {
383         struct pp_hwmgr *hwmgr = handle;
384         uint32_t clk = 0;
385
386         if (!hwmgr || !hwmgr->pm_en)
387                 return 0;
388
389         if (hwmgr->hwmgr_func->get_sclk == NULL) {
390                 pr_info_ratelimited("%s was not implemented.\n", __func__);
391                 return 0;
392         }
393         mutex_lock(&hwmgr->smu_lock);
394         clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
395         mutex_unlock(&hwmgr->smu_lock);
396         return clk;
397 }
398
399 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
400 {
401         struct pp_hwmgr *hwmgr = handle;
402         uint32_t clk = 0;
403
404         if (!hwmgr || !hwmgr->pm_en)
405                 return 0;
406
407         if (hwmgr->hwmgr_func->get_mclk == NULL) {
408                 pr_info_ratelimited("%s was not implemented.\n", __func__);
409                 return 0;
410         }
411         mutex_lock(&hwmgr->smu_lock);
412         clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
413         mutex_unlock(&hwmgr->smu_lock);
414         return clk;
415 }
416
417 static void pp_dpm_powergate_vce(void *handle, bool gate)
418 {
419         struct pp_hwmgr *hwmgr = handle;
420
421         if (!hwmgr || !hwmgr->pm_en)
422                 return;
423
424         if (hwmgr->hwmgr_func->powergate_vce == NULL) {
425                 pr_info_ratelimited("%s was not implemented.\n", __func__);
426                 return;
427         }
428         mutex_lock(&hwmgr->smu_lock);
429         hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
430         mutex_unlock(&hwmgr->smu_lock);
431 }
432
433 static void pp_dpm_powergate_uvd(void *handle, bool gate)
434 {
435         struct pp_hwmgr *hwmgr = handle;
436
437         if (!hwmgr || !hwmgr->pm_en)
438                 return;
439
440         if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
441                 pr_info_ratelimited("%s was not implemented.\n", __func__);
442                 return;
443         }
444         mutex_lock(&hwmgr->smu_lock);
445         hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
446         mutex_unlock(&hwmgr->smu_lock);
447 }
448
449 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
450                 enum amd_pm_state_type *user_state)
451 {
452         int ret = 0;
453         struct pp_hwmgr *hwmgr = handle;
454
455         if (!hwmgr || !hwmgr->pm_en)
456                 return -EINVAL;
457
458         mutex_lock(&hwmgr->smu_lock);
459         ret = hwmgr_handle_task(hwmgr, task_id, user_state);
460         mutex_unlock(&hwmgr->smu_lock);
461
462         return ret;
463 }
464
465 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
466 {
467         struct pp_hwmgr *hwmgr = handle;
468         struct pp_power_state *state;
469         enum amd_pm_state_type pm_type;
470
471         if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
472                 return -EINVAL;
473
474         mutex_lock(&hwmgr->smu_lock);
475
476         state = hwmgr->current_ps;
477
478         switch (state->classification.ui_label) {
479         case PP_StateUILabel_Battery:
480                 pm_type = POWER_STATE_TYPE_BATTERY;
481                 break;
482         case PP_StateUILabel_Balanced:
483                 pm_type = POWER_STATE_TYPE_BALANCED;
484                 break;
485         case PP_StateUILabel_Performance:
486                 pm_type = POWER_STATE_TYPE_PERFORMANCE;
487                 break;
488         default:
489                 if (state->classification.flags & PP_StateClassificationFlag_Boot)
490                         pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
491                 else
492                         pm_type = POWER_STATE_TYPE_DEFAULT;
493                 break;
494         }
495         mutex_unlock(&hwmgr->smu_lock);
496
497         return pm_type;
498 }
499
500 static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
501 {
502         struct pp_hwmgr *hwmgr = handle;
503
504         if (!hwmgr || !hwmgr->pm_en)
505                 return;
506
507         if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
508                 pr_info_ratelimited("%s was not implemented.\n", __func__);
509                 return;
510         }
511         mutex_lock(&hwmgr->smu_lock);
512         hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
513         mutex_unlock(&hwmgr->smu_lock);
514 }
515
516 static uint32_t pp_dpm_get_fan_control_mode(void *handle)
517 {
518         struct pp_hwmgr *hwmgr = handle;
519         uint32_t mode = 0;
520
521         if (!hwmgr || !hwmgr->pm_en)
522                 return 0;
523
524         if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
525                 pr_info_ratelimited("%s was not implemented.\n", __func__);
526                 return 0;
527         }
528         mutex_lock(&hwmgr->smu_lock);
529         mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
530         mutex_unlock(&hwmgr->smu_lock);
531         return mode;
532 }
533
534 static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
535 {
536         struct pp_hwmgr *hwmgr = handle;
537         int ret = 0;
538
539         if (!hwmgr || !hwmgr->pm_en)
540                 return -EINVAL;
541
542         if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
543                 pr_info_ratelimited("%s was not implemented.\n", __func__);
544                 return 0;
545         }
546         mutex_lock(&hwmgr->smu_lock);
547         ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
548         mutex_unlock(&hwmgr->smu_lock);
549         return ret;
550 }
551
552 static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
553 {
554         struct pp_hwmgr *hwmgr = handle;
555         int ret = 0;
556
557         if (!hwmgr || !hwmgr->pm_en)
558                 return -EINVAL;
559
560         if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
561                 pr_info_ratelimited("%s was not implemented.\n", __func__);
562                 return 0;
563         }
564
565         mutex_lock(&hwmgr->smu_lock);
566         ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
567         mutex_unlock(&hwmgr->smu_lock);
568         return ret;
569 }
570
571 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
572 {
573         struct pp_hwmgr *hwmgr = handle;
574         int ret = 0;
575
576         if (!hwmgr || !hwmgr->pm_en)
577                 return -EINVAL;
578
579         if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
580                 return -EINVAL;
581
582         mutex_lock(&hwmgr->smu_lock);
583         ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
584         mutex_unlock(&hwmgr->smu_lock);
585         return ret;
586 }
587
588 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
589 {
590         struct pp_hwmgr *hwmgr = handle;
591         int ret = 0;
592
593         if (!hwmgr || !hwmgr->pm_en)
594                 return -EINVAL;
595
596         if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
597                 pr_info_ratelimited("%s was not implemented.\n", __func__);
598                 return 0;
599         }
600         mutex_lock(&hwmgr->smu_lock);
601         ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
602         mutex_unlock(&hwmgr->smu_lock);
603         return ret;
604 }
605
606 static int pp_dpm_get_pp_num_states(void *handle,
607                 struct pp_states_info *data)
608 {
609         struct pp_hwmgr *hwmgr = handle;
610         int i;
611
612         memset(data, 0, sizeof(*data));
613
614         if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
615                 return -EINVAL;
616
617         mutex_lock(&hwmgr->smu_lock);
618
619         data->nums = hwmgr->num_ps;
620
621         for (i = 0; i < hwmgr->num_ps; i++) {
622                 struct pp_power_state *state = (struct pp_power_state *)
623                                 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
624                 switch (state->classification.ui_label) {
625                 case PP_StateUILabel_Battery:
626                         data->states[i] = POWER_STATE_TYPE_BATTERY;
627                         break;
628                 case PP_StateUILabel_Balanced:
629                         data->states[i] = POWER_STATE_TYPE_BALANCED;
630                         break;
631                 case PP_StateUILabel_Performance:
632                         data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
633                         break;
634                 default:
635                         if (state->classification.flags & PP_StateClassificationFlag_Boot)
636                                 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
637                         else
638                                 data->states[i] = POWER_STATE_TYPE_DEFAULT;
639                 }
640         }
641         mutex_unlock(&hwmgr->smu_lock);
642         return 0;
643 }
644
645 static int pp_dpm_get_pp_table(void *handle, char **table)
646 {
647         struct pp_hwmgr *hwmgr = handle;
648         int size = 0;
649
650         if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
651                 return -EINVAL;
652
653         mutex_lock(&hwmgr->smu_lock);
654         *table = (char *)hwmgr->soft_pp_table;
655         size = hwmgr->soft_pp_table_size;
656         mutex_unlock(&hwmgr->smu_lock);
657         return size;
658 }
659
660 static int amd_powerplay_reset(void *handle)
661 {
662         struct pp_hwmgr *hwmgr = handle;
663         int ret;
664
665         ret = hwmgr_hw_fini(hwmgr);
666         if (ret)
667                 return ret;
668
669         ret = hwmgr_hw_init(hwmgr);
670         if (ret)
671                 return ret;
672
673         return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
674 }
675
676 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
677 {
678         struct pp_hwmgr *hwmgr = handle;
679         int ret = -ENOMEM;
680
681         if (!hwmgr || !hwmgr->pm_en)
682                 return -EINVAL;
683
684         mutex_lock(&hwmgr->smu_lock);
685         if (!hwmgr->hardcode_pp_table) {
686                 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
687                                                    hwmgr->soft_pp_table_size,
688                                                    GFP_KERNEL);
689                 if (!hwmgr->hardcode_pp_table)
690                         goto err;
691         }
692
693         memcpy(hwmgr->hardcode_pp_table, buf, size);
694
695         hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
696
697         ret = amd_powerplay_reset(handle);
698         if (ret)
699                 goto err;
700
701         if (hwmgr->hwmgr_func->avfs_control) {
702                 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
703                 if (ret)
704                         goto err;
705         }
706         mutex_unlock(&hwmgr->smu_lock);
707         return 0;
708 err:
709         mutex_unlock(&hwmgr->smu_lock);
710         return ret;
711 }
712
713 static int pp_dpm_force_clock_level(void *handle,
714                 enum pp_clock_type type, uint32_t mask)
715 {
716         struct pp_hwmgr *hwmgr = handle;
717         int ret = 0;
718
719         if (!hwmgr || !hwmgr->pm_en)
720                 return -EINVAL;
721
722         if (hwmgr->hwmgr_func->force_clock_level == NULL) {
723                 pr_info_ratelimited("%s was not implemented.\n", __func__);
724                 return 0;
725         }
726
727         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
728                 pr_debug("force clock level is for dpm manual mode only.\n");
729                 return -EINVAL;
730         }
731
732         mutex_lock(&hwmgr->smu_lock);
733         ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
734         mutex_unlock(&hwmgr->smu_lock);
735         return ret;
736 }
737
738 static int pp_dpm_print_clock_levels(void *handle,
739                 enum pp_clock_type type, char *buf)
740 {
741         struct pp_hwmgr *hwmgr = handle;
742         int ret = 0;
743
744         if (!hwmgr || !hwmgr->pm_en)
745                 return -EINVAL;
746
747         if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
748                 pr_info_ratelimited("%s was not implemented.\n", __func__);
749                 return 0;
750         }
751         mutex_lock(&hwmgr->smu_lock);
752         ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
753         mutex_unlock(&hwmgr->smu_lock);
754         return ret;
755 }
756
757 static int pp_dpm_get_sclk_od(void *handle)
758 {
759         struct pp_hwmgr *hwmgr = handle;
760         int ret = 0;
761
762         if (!hwmgr || !hwmgr->pm_en)
763                 return -EINVAL;
764
765         if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
766                 pr_info_ratelimited("%s was not implemented.\n", __func__);
767                 return 0;
768         }
769         mutex_lock(&hwmgr->smu_lock);
770         ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
771         mutex_unlock(&hwmgr->smu_lock);
772         return ret;
773 }
774
775 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
776 {
777         struct pp_hwmgr *hwmgr = handle;
778         int ret = 0;
779
780         if (!hwmgr || !hwmgr->pm_en)
781                 return -EINVAL;
782
783         if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
784                 pr_info_ratelimited("%s was not implemented.\n", __func__);
785                 return 0;
786         }
787
788         mutex_lock(&hwmgr->smu_lock);
789         ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
790         mutex_unlock(&hwmgr->smu_lock);
791         return ret;
792 }
793
794 static int pp_dpm_get_mclk_od(void *handle)
795 {
796         struct pp_hwmgr *hwmgr = handle;
797         int ret = 0;
798
799         if (!hwmgr || !hwmgr->pm_en)
800                 return -EINVAL;
801
802         if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
803                 pr_info_ratelimited("%s was not implemented.\n", __func__);
804                 return 0;
805         }
806         mutex_lock(&hwmgr->smu_lock);
807         ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
808         mutex_unlock(&hwmgr->smu_lock);
809         return ret;
810 }
811
812 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
813 {
814         struct pp_hwmgr *hwmgr = handle;
815         int ret = 0;
816
817         if (!hwmgr || !hwmgr->pm_en)
818                 return -EINVAL;
819
820         if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
821                 pr_info_ratelimited("%s was not implemented.\n", __func__);
822                 return 0;
823         }
824         mutex_lock(&hwmgr->smu_lock);
825         ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
826         mutex_unlock(&hwmgr->smu_lock);
827         return ret;
828 }
829
830 static int pp_dpm_read_sensor(void *handle, int idx,
831                               void *value, int *size)
832 {
833         struct pp_hwmgr *hwmgr = handle;
834         int ret = 0;
835
836         if (!hwmgr || !hwmgr->pm_en || !value)
837                 return -EINVAL;
838
839         switch (idx) {
840         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
841                 *((uint32_t *)value) = hwmgr->pstate_sclk;
842                 return 0;
843         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
844                 *((uint32_t *)value) = hwmgr->pstate_mclk;
845                 return 0;
846         case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
847                 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
848                 return 0;
849         case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
850                 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
851                 return 0;
852         default:
853                 mutex_lock(&hwmgr->smu_lock);
854                 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
855                 mutex_unlock(&hwmgr->smu_lock);
856                 return ret;
857         }
858 }
859
860 static struct amd_vce_state*
861 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
862 {
863         struct pp_hwmgr *hwmgr = handle;
864
865         if (!hwmgr || !hwmgr->pm_en)
866                 return NULL;
867
868         if (idx < hwmgr->num_vce_state_tables)
869                 return &hwmgr->vce_states[idx];
870         return NULL;
871 }
872
873 static int pp_get_power_profile_mode(void *handle, char *buf)
874 {
875         struct pp_hwmgr *hwmgr = handle;
876
877         if (!hwmgr || !hwmgr->pm_en || !buf)
878                 return -EINVAL;
879
880         if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
881                 pr_info_ratelimited("%s was not implemented.\n", __func__);
882                 return snprintf(buf, PAGE_SIZE, "\n");
883         }
884
885         return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
886 }
887
888 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
889 {
890         struct pp_hwmgr *hwmgr = handle;
891         int ret = -EINVAL;
892
893         if (!hwmgr || !hwmgr->pm_en)
894                 return ret;
895
896         if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
897                 pr_info_ratelimited("%s was not implemented.\n", __func__);
898                 return ret;
899         }
900
901         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
902                 pr_debug("power profile setting is for manual dpm mode only.\n");
903                 return ret;
904         }
905
906         mutex_lock(&hwmgr->smu_lock);
907         ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
908         mutex_unlock(&hwmgr->smu_lock);
909         return ret;
910 }
911
912 static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
913 {
914         struct pp_hwmgr *hwmgr = handle;
915
916         if (!hwmgr || !hwmgr->pm_en)
917                 return -EINVAL;
918
919         if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
920                 pr_info_ratelimited("%s was not implemented.\n", __func__);
921                 return -EINVAL;
922         }
923
924         return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
925 }
926
927 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
928 {
929         struct pp_hwmgr *hwmgr = handle;
930
931         if (!hwmgr || !hwmgr->pm_en)
932                 return -EINVAL;
933
934         if (hwmgr->hwmgr_func->set_mp1_state)
935                 return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
936
937         return 0;
938 }
939
940 static int pp_dpm_switch_power_profile(void *handle,
941                 enum PP_SMC_POWER_PROFILE type, bool en)
942 {
943         struct pp_hwmgr *hwmgr = handle;
944         long workload;
945         uint32_t index;
946
947         if (!hwmgr || !hwmgr->pm_en)
948                 return -EINVAL;
949
950         if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
951                 pr_info_ratelimited("%s was not implemented.\n", __func__);
952                 return -EINVAL;
953         }
954
955         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
956                 return -EINVAL;
957
958         mutex_lock(&hwmgr->smu_lock);
959
960         if (!en) {
961                 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
962                 index = fls(hwmgr->workload_mask);
963                 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
964                 workload = hwmgr->workload_setting[index];
965         } else {
966                 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
967                 index = fls(hwmgr->workload_mask);
968                 index = index <= Workload_Policy_Max ? index - 1 : 0;
969                 workload = hwmgr->workload_setting[index];
970         }
971
972         if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
973                 hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
974                         if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
975                                 mutex_unlock(&hwmgr->smu_lock);
976                                 return -EINVAL;
977                         }
978         }
979
980         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
981                 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
982         mutex_unlock(&hwmgr->smu_lock);
983
984         return 0;
985 }
986
987 static int pp_set_power_limit(void *handle, uint32_t limit)
988 {
989         struct pp_hwmgr *hwmgr = handle;
990         uint32_t max_power_limit;
991
992         if (!hwmgr || !hwmgr->pm_en)
993                 return -EINVAL;
994
995         if (hwmgr->hwmgr_func->set_power_limit == NULL) {
996                 pr_info_ratelimited("%s was not implemented.\n", __func__);
997                 return -EINVAL;
998         }
999
1000         if (limit == 0)
1001                 limit = hwmgr->default_power_limit;
1002
1003         max_power_limit = hwmgr->default_power_limit;
1004         if (hwmgr->od_enabled) {
1005                 max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1006                 max_power_limit /= 100;
1007         }
1008
1009         if (limit > max_power_limit)
1010                 return -EINVAL;
1011
1012         mutex_lock(&hwmgr->smu_lock);
1013         hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1014         hwmgr->power_limit = limit;
1015         mutex_unlock(&hwmgr->smu_lock);
1016         return 0;
1017 }
1018
1019 static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1020 {
1021         struct pp_hwmgr *hwmgr = handle;
1022
1023         if (!hwmgr || !hwmgr->pm_en ||!limit)
1024                 return -EINVAL;
1025
1026         mutex_lock(&hwmgr->smu_lock);
1027
1028         if (default_limit) {
1029                 *limit = hwmgr->default_power_limit;
1030                 if (hwmgr->od_enabled) {
1031                         *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1032                         *limit /= 100;
1033                 }
1034         }
1035         else
1036                 *limit = hwmgr->power_limit;
1037
1038         mutex_unlock(&hwmgr->smu_lock);
1039
1040         return 0;
1041 }
1042
1043 static int pp_display_configuration_change(void *handle,
1044         const struct amd_pp_display_configuration *display_config)
1045 {
1046         struct pp_hwmgr *hwmgr = handle;
1047
1048         if (!hwmgr || !hwmgr->pm_en)
1049                 return -EINVAL;
1050
1051         mutex_lock(&hwmgr->smu_lock);
1052         phm_store_dal_configuration_data(hwmgr, display_config);
1053         mutex_unlock(&hwmgr->smu_lock);
1054         return 0;
1055 }
1056
1057 static int pp_get_display_power_level(void *handle,
1058                 struct amd_pp_simple_clock_info *output)
1059 {
1060         struct pp_hwmgr *hwmgr = handle;
1061         int ret = 0;
1062
1063         if (!hwmgr || !hwmgr->pm_en ||!output)
1064                 return -EINVAL;
1065
1066         mutex_lock(&hwmgr->smu_lock);
1067         ret = phm_get_dal_power_level(hwmgr, output);
1068         mutex_unlock(&hwmgr->smu_lock);
1069         return ret;
1070 }
1071
1072 static int pp_get_current_clocks(void *handle,
1073                 struct amd_pp_clock_info *clocks)
1074 {
1075         struct amd_pp_simple_clock_info simple_clocks = { 0 };
1076         struct pp_clock_info hw_clocks;
1077         struct pp_hwmgr *hwmgr = handle;
1078         int ret = 0;
1079
1080         if (!hwmgr || !hwmgr->pm_en)
1081                 return -EINVAL;
1082
1083         mutex_lock(&hwmgr->smu_lock);
1084
1085         phm_get_dal_power_level(hwmgr, &simple_clocks);
1086
1087         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1088                                         PHM_PlatformCaps_PowerContainment))
1089                 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1090                                         &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1091         else
1092                 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1093                                         &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1094
1095         if (ret) {
1096                 pr_debug("Error in phm_get_clock_info \n");
1097                 mutex_unlock(&hwmgr->smu_lock);
1098                 return -EINVAL;
1099         }
1100
1101         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1102         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1103         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1104         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1105         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1106         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1107
1108         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1109         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1110
1111         if (simple_clocks.level == 0)
1112                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1113         else
1114                 clocks->max_clocks_state = simple_clocks.level;
1115
1116         if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1117                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1118                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1119         }
1120         mutex_unlock(&hwmgr->smu_lock);
1121         return 0;
1122 }
1123
1124 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1125 {
1126         struct pp_hwmgr *hwmgr = handle;
1127         int ret = 0;
1128
1129         if (!hwmgr || !hwmgr->pm_en)
1130                 return -EINVAL;
1131
1132         if (clocks == NULL)
1133                 return -EINVAL;
1134
1135         mutex_lock(&hwmgr->smu_lock);
1136         ret = phm_get_clock_by_type(hwmgr, type, clocks);
1137         mutex_unlock(&hwmgr->smu_lock);
1138         return ret;
1139 }
1140
1141 static int pp_get_clock_by_type_with_latency(void *handle,
1142                 enum amd_pp_clock_type type,
1143                 struct pp_clock_levels_with_latency *clocks)
1144 {
1145         struct pp_hwmgr *hwmgr = handle;
1146         int ret = 0;
1147
1148         if (!hwmgr || !hwmgr->pm_en ||!clocks)
1149                 return -EINVAL;
1150
1151         mutex_lock(&hwmgr->smu_lock);
1152         ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1153         mutex_unlock(&hwmgr->smu_lock);
1154         return ret;
1155 }
1156
1157 static int pp_get_clock_by_type_with_voltage(void *handle,
1158                 enum amd_pp_clock_type type,
1159                 struct pp_clock_levels_with_voltage *clocks)
1160 {
1161         struct pp_hwmgr *hwmgr = handle;
1162         int ret = 0;
1163
1164         if (!hwmgr || !hwmgr->pm_en ||!clocks)
1165                 return -EINVAL;
1166
1167         mutex_lock(&hwmgr->smu_lock);
1168
1169         ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1170
1171         mutex_unlock(&hwmgr->smu_lock);
1172         return ret;
1173 }
1174
1175 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1176                 void *clock_ranges)
1177 {
1178         struct pp_hwmgr *hwmgr = handle;
1179         int ret = 0;
1180
1181         if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1182                 return -EINVAL;
1183
1184         mutex_lock(&hwmgr->smu_lock);
1185         ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1186                         clock_ranges);
1187         mutex_unlock(&hwmgr->smu_lock);
1188
1189         return ret;
1190 }
1191
1192 static int pp_display_clock_voltage_request(void *handle,
1193                 struct pp_display_clock_request *clock)
1194 {
1195         struct pp_hwmgr *hwmgr = handle;
1196         int ret = 0;
1197
1198         if (!hwmgr || !hwmgr->pm_en ||!clock)
1199                 return -EINVAL;
1200
1201         mutex_lock(&hwmgr->smu_lock);
1202         ret = phm_display_clock_voltage_request(hwmgr, clock);
1203         mutex_unlock(&hwmgr->smu_lock);
1204
1205         return ret;
1206 }
1207
1208 static int pp_get_display_mode_validation_clocks(void *handle,
1209                 struct amd_pp_simple_clock_info *clocks)
1210 {
1211         struct pp_hwmgr *hwmgr = handle;
1212         int ret = 0;
1213
1214         if (!hwmgr || !hwmgr->pm_en ||!clocks)
1215                 return -EINVAL;
1216
1217         clocks->level = PP_DAL_POWERLEVEL_7;
1218
1219         mutex_lock(&hwmgr->smu_lock);
1220
1221         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1222                 ret = phm_get_max_high_clocks(hwmgr, clocks);
1223
1224         mutex_unlock(&hwmgr->smu_lock);
1225         return ret;
1226 }
1227
1228 static int pp_dpm_powergate_mmhub(void *handle)
1229 {
1230         struct pp_hwmgr *hwmgr = handle;
1231
1232         if (!hwmgr || !hwmgr->pm_en)
1233                 return -EINVAL;
1234
1235         if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1236                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1237                 return 0;
1238         }
1239
1240         return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1241 }
1242
1243 static int pp_dpm_powergate_gfx(void *handle, bool gate)
1244 {
1245         struct pp_hwmgr *hwmgr = handle;
1246
1247         if (!hwmgr || !hwmgr->pm_en)
1248                 return 0;
1249
1250         if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1251                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1252                 return 0;
1253         }
1254
1255         return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1256 }
1257
1258 static void pp_dpm_powergate_acp(void *handle, bool gate)
1259 {
1260         struct pp_hwmgr *hwmgr = handle;
1261
1262         if (!hwmgr || !hwmgr->pm_en)
1263                 return;
1264
1265         if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1266                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1267                 return;
1268         }
1269
1270         hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1271 }
1272
1273 static void pp_dpm_powergate_sdma(void *handle, bool gate)
1274 {
1275         struct pp_hwmgr *hwmgr = handle;
1276
1277         if (!hwmgr)
1278                 return;
1279
1280         if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1281                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1282                 return;
1283         }
1284
1285         hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1286 }
1287
1288 static int pp_set_powergating_by_smu(void *handle,
1289                                 uint32_t block_type, bool gate)
1290 {
1291         int ret = 0;
1292
1293         switch (block_type) {
1294         case AMD_IP_BLOCK_TYPE_UVD:
1295         case AMD_IP_BLOCK_TYPE_VCN:
1296                 pp_dpm_powergate_uvd(handle, gate);
1297                 break;
1298         case AMD_IP_BLOCK_TYPE_VCE:
1299                 pp_dpm_powergate_vce(handle, gate);
1300                 break;
1301         case AMD_IP_BLOCK_TYPE_GMC:
1302                 pp_dpm_powergate_mmhub(handle);
1303                 break;
1304         case AMD_IP_BLOCK_TYPE_GFX:
1305                 ret = pp_dpm_powergate_gfx(handle, gate);
1306                 break;
1307         case AMD_IP_BLOCK_TYPE_ACP:
1308                 pp_dpm_powergate_acp(handle, gate);
1309                 break;
1310         case AMD_IP_BLOCK_TYPE_SDMA:
1311                 pp_dpm_powergate_sdma(handle, gate);
1312                 break;
1313         default:
1314                 break;
1315         }
1316         return ret;
1317 }
1318
1319 static int pp_notify_smu_enable_pwe(void *handle)
1320 {
1321         struct pp_hwmgr *hwmgr = handle;
1322
1323         if (!hwmgr || !hwmgr->pm_en)
1324                 return -EINVAL;
1325
1326         if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1327                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1328                 return -EINVAL;
1329         }
1330
1331         mutex_lock(&hwmgr->smu_lock);
1332         hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1333         mutex_unlock(&hwmgr->smu_lock);
1334
1335         return 0;
1336 }
1337
1338 static int pp_enable_mgpu_fan_boost(void *handle)
1339 {
1340         struct pp_hwmgr *hwmgr = handle;
1341
1342         if (!hwmgr)
1343                 return -EINVAL;
1344
1345         if (!hwmgr->pm_en ||
1346              hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1347                 return 0;
1348
1349         mutex_lock(&hwmgr->smu_lock);
1350         hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1351         mutex_unlock(&hwmgr->smu_lock);
1352
1353         return 0;
1354 }
1355
1356 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1357 {
1358         struct pp_hwmgr *hwmgr = handle;
1359
1360         if (!hwmgr || !hwmgr->pm_en)
1361                 return -EINVAL;
1362
1363         if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1364                 pr_debug("%s was not implemented.\n", __func__);
1365                 return -EINVAL;
1366         }
1367
1368         mutex_lock(&hwmgr->smu_lock);
1369         hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1370         mutex_unlock(&hwmgr->smu_lock);
1371
1372         return 0;
1373 }
1374
1375 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1376 {
1377         struct pp_hwmgr *hwmgr = handle;
1378
1379         if (!hwmgr || !hwmgr->pm_en)
1380                 return -EINVAL;
1381
1382         if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1383                 pr_debug("%s was not implemented.\n", __func__);
1384                 return -EINVAL;
1385         }
1386
1387         mutex_lock(&hwmgr->smu_lock);
1388         hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1389         mutex_unlock(&hwmgr->smu_lock);
1390
1391         return 0;
1392 }
1393
1394 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1395 {
1396         struct pp_hwmgr *hwmgr = handle;
1397
1398         if (!hwmgr || !hwmgr->pm_en)
1399                 return -EINVAL;
1400
1401         if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1402                 pr_debug("%s was not implemented.\n", __func__);
1403                 return -EINVAL;
1404         }
1405
1406         mutex_lock(&hwmgr->smu_lock);
1407         hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1408         mutex_unlock(&hwmgr->smu_lock);
1409
1410         return 0;
1411 }
1412
1413 static int pp_set_active_display_count(void *handle, uint32_t count)
1414 {
1415         struct pp_hwmgr *hwmgr = handle;
1416         int ret = 0;
1417
1418         if (!hwmgr || !hwmgr->pm_en)
1419                 return -EINVAL;
1420
1421         mutex_lock(&hwmgr->smu_lock);
1422         ret = phm_set_active_display_count(hwmgr, count);
1423         mutex_unlock(&hwmgr->smu_lock);
1424
1425         return ret;
1426 }
1427
1428 static int pp_get_asic_baco_capability(void *handle, bool *cap)
1429 {
1430         struct pp_hwmgr *hwmgr = handle;
1431
1432         *cap = false;
1433         if (!hwmgr)
1434                 return -EINVAL;
1435
1436         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
1437                 return 0;
1438
1439         mutex_lock(&hwmgr->smu_lock);
1440         hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
1441         mutex_unlock(&hwmgr->smu_lock);
1442
1443         return 0;
1444 }
1445
1446 static int pp_get_asic_baco_state(void *handle, int *state)
1447 {
1448         struct pp_hwmgr *hwmgr = handle;
1449
1450         if (!hwmgr)
1451                 return -EINVAL;
1452
1453         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1454                 return 0;
1455
1456         mutex_lock(&hwmgr->smu_lock);
1457         hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1458         mutex_unlock(&hwmgr->smu_lock);
1459
1460         return 0;
1461 }
1462
1463 static int pp_set_asic_baco_state(void *handle, int state)
1464 {
1465         struct pp_hwmgr *hwmgr = handle;
1466
1467         if (!hwmgr)
1468                 return -EINVAL;
1469
1470         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
1471                 return 0;
1472
1473         mutex_lock(&hwmgr->smu_lock);
1474         hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1475         mutex_unlock(&hwmgr->smu_lock);
1476
1477         return 0;
1478 }
1479
1480 static int pp_get_ppfeature_status(void *handle, char *buf)
1481 {
1482         struct pp_hwmgr *hwmgr = handle;
1483         int ret = 0;
1484
1485         if (!hwmgr || !hwmgr->pm_en || !buf)
1486                 return -EINVAL;
1487
1488         if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1489                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1490                 return -EINVAL;
1491         }
1492
1493         mutex_lock(&hwmgr->smu_lock);
1494         ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1495         mutex_unlock(&hwmgr->smu_lock);
1496
1497         return ret;
1498 }
1499
1500 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1501 {
1502         struct pp_hwmgr *hwmgr = handle;
1503         int ret = 0;
1504
1505         if (!hwmgr || !hwmgr->pm_en)
1506                 return -EINVAL;
1507
1508         if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1509                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1510                 return -EINVAL;
1511         }
1512
1513         mutex_lock(&hwmgr->smu_lock);
1514         ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1515         mutex_unlock(&hwmgr->smu_lock);
1516
1517         return ret;
1518 }
1519
1520 static int pp_asic_reset_mode_2(void *handle)
1521 {
1522         struct pp_hwmgr *hwmgr = handle;
1523                 int ret = 0;
1524
1525         if (!hwmgr || !hwmgr->pm_en)
1526                 return -EINVAL;
1527
1528         if (hwmgr->hwmgr_func->asic_reset == NULL) {
1529                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1530                 return -EINVAL;
1531         }
1532
1533         mutex_lock(&hwmgr->smu_lock);
1534         ret = hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1535         mutex_unlock(&hwmgr->smu_lock);
1536
1537         return ret;
1538 }
1539
1540 static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1541 {
1542         struct pp_hwmgr *hwmgr = handle;
1543         int ret = 0;
1544
1545         if (!hwmgr || !hwmgr->pm_en)
1546                 return -EINVAL;
1547
1548         if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1549                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1550                 return -EINVAL;
1551         }
1552
1553         mutex_lock(&hwmgr->smu_lock);
1554         ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1555         mutex_unlock(&hwmgr->smu_lock);
1556
1557         return ret;
1558 }
1559
1560 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1561 {
1562         struct pp_hwmgr *hwmgr = handle;
1563
1564         if (!hwmgr)
1565                 return -EINVAL;
1566
1567         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1568                 return 0;
1569
1570         mutex_lock(&hwmgr->smu_lock);
1571         hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1572         mutex_unlock(&hwmgr->smu_lock);
1573
1574         return 0;
1575 }
1576
1577 static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1578 {
1579         struct pp_hwmgr *hwmgr = handle;
1580
1581         if (!hwmgr)
1582                 return -EINVAL;
1583
1584         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1585                 return 0;
1586
1587         mutex_lock(&hwmgr->smu_lock);
1588         hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1589         mutex_unlock(&hwmgr->smu_lock);
1590
1591         return 0;
1592 }
1593
1594 static const struct amd_pm_funcs pp_dpm_funcs = {
1595         .load_firmware = pp_dpm_load_fw,
1596         .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1597         .force_performance_level = pp_dpm_force_performance_level,
1598         .get_performance_level = pp_dpm_get_performance_level,
1599         .get_current_power_state = pp_dpm_get_current_power_state,
1600         .dispatch_tasks = pp_dpm_dispatch_tasks,
1601         .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1602         .get_fan_control_mode = pp_dpm_get_fan_control_mode,
1603         .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
1604         .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
1605         .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1606         .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1607         .get_pp_num_states = pp_dpm_get_pp_num_states,
1608         .get_pp_table = pp_dpm_get_pp_table,
1609         .set_pp_table = pp_dpm_set_pp_table,
1610         .force_clock_level = pp_dpm_force_clock_level,
1611         .print_clock_levels = pp_dpm_print_clock_levels,
1612         .get_sclk_od = pp_dpm_get_sclk_od,
1613         .set_sclk_od = pp_dpm_set_sclk_od,
1614         .get_mclk_od = pp_dpm_get_mclk_od,
1615         .set_mclk_od = pp_dpm_set_mclk_od,
1616         .read_sensor = pp_dpm_read_sensor,
1617         .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1618         .switch_power_profile = pp_dpm_switch_power_profile,
1619         .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1620         .set_powergating_by_smu = pp_set_powergating_by_smu,
1621         .get_power_profile_mode = pp_get_power_profile_mode,
1622         .set_power_profile_mode = pp_set_power_profile_mode,
1623         .odn_edit_dpm_table = pp_odn_edit_dpm_table,
1624         .set_mp1_state = pp_dpm_set_mp1_state,
1625         .set_power_limit = pp_set_power_limit,
1626         .get_power_limit = pp_get_power_limit,
1627 /* export to DC */
1628         .get_sclk = pp_dpm_get_sclk,
1629         .get_mclk = pp_dpm_get_mclk,
1630         .display_configuration_change = pp_display_configuration_change,
1631         .get_display_power_level = pp_get_display_power_level,
1632         .get_current_clocks = pp_get_current_clocks,
1633         .get_clock_by_type = pp_get_clock_by_type,
1634         .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1635         .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1636         .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1637         .display_clock_voltage_request = pp_display_clock_voltage_request,
1638         .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1639         .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1640         .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1641         .set_active_display_count = pp_set_active_display_count,
1642         .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1643         .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1644         .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1645         .get_asic_baco_capability = pp_get_asic_baco_capability,
1646         .get_asic_baco_state = pp_get_asic_baco_state,
1647         .set_asic_baco_state = pp_set_asic_baco_state,
1648         .get_ppfeature_status = pp_get_ppfeature_status,
1649         .set_ppfeature_status = pp_set_ppfeature_status,
1650         .asic_reset_mode_2 = pp_asic_reset_mode_2,
1651         .smu_i2c_bus_access = pp_smu_i2c_bus_access,
1652         .set_df_cstate = pp_set_df_cstate,
1653         .set_xgmi_pstate = pp_set_xgmi_pstate,
1654 };