]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drm/amd/powerplay: add socclk profile dpm support.
[linux.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/firmware.h>
24
25 #include "pp_debug.h"
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
29 #include "smu_v11_0.h"
30 #include "atom.h"
31 #include "amd_pcie.h"
32
33 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
34 {
35         int ret = 0;
36
37         if (!if_version && !smu_version)
38                 return -EINVAL;
39
40         if (if_version) {
41                 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
42                 if (ret)
43                         return ret;
44
45                 ret = smu_read_smc_arg(smu, if_version);
46                 if (ret)
47                         return ret;
48         }
49
50         if (smu_version) {
51                 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
52                 if (ret)
53                         return ret;
54
55                 ret = smu_read_smc_arg(smu, smu_version);
56                 if (ret)
57                         return ret;
58         }
59
60         return ret;
61 }
62
63 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
64                             uint32_t min, uint32_t max)
65 {
66         int ret = 0, clk_id = 0;
67         uint32_t param;
68
69         if (min <= 0 && max <= 0)
70                 return -EINVAL;
71
72         clk_id = smu_clk_get_index(smu, clk_type);
73         if (clk_id < 0)
74                 return clk_id;
75
76         if (max > 0) {
77                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
78                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
79                                                   param);
80                 if (ret)
81                         return ret;
82         }
83
84         if (min > 0) {
85                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
86                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
87                                                   param);
88                 if (ret)
89                         return ret;
90         }
91
92
93         return ret;
94 }
95
96 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
97                             uint32_t min, uint32_t max)
98 {
99         int ret = 0, clk_id = 0;
100         uint32_t param;
101
102         if (min <= 0 && max <= 0)
103                 return -EINVAL;
104
105         clk_id = smu_clk_get_index(smu, clk_type);
106         if (clk_id < 0)
107                 return clk_id;
108
109         if (max > 0) {
110                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
111                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
112                                                   param);
113                 if (ret)
114                         return ret;
115         }
116
117         if (min > 0) {
118                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
119                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
120                                                   param);
121                 if (ret)
122                         return ret;
123         }
124
125
126         return ret;
127 }
128
129 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
130                            uint32_t *min, uint32_t *max)
131 {
132         int ret = 0, clk_id = 0;
133         uint32_t param = 0;
134
135         if (!min && !max)
136                 return -EINVAL;
137
138         switch (clk_type) {
139         case SMU_MCLK:
140         case SMU_UCLK:
141                 if (!smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
142                         pr_warn("uclk dpm is not enabled\n");
143                         return 0;
144                 }
145                 break;
146         case SMU_GFXCLK:
147         case SMU_SCLK:
148                 if (!smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
149                         pr_warn("gfxclk dpm is not enabled\n");
150                         return 0;
151                 }
152         case SMU_SOCCLK:
153                 if (!smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
154                         pr_warn("sockclk dpm is not enabled\n");
155                         return 0;
156                 }
157                 break;
158         default:
159                 break;
160         }
161
162         mutex_lock(&smu->mutex);
163         clk_id = smu_clk_get_index(smu, clk_type);
164         if (clk_id < 0) {
165                 ret = -EINVAL;
166                 goto failed;
167         }
168
169         param = (clk_id & 0xffff) << 16;
170
171         if (max) {
172                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
173                 if (ret)
174                         goto failed;
175                 ret = smu_read_smc_arg(smu, max);
176                 if (ret)
177                         goto failed;
178         }
179
180         if (min) {
181                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
182                 if (ret)
183                         goto failed;
184                 ret = smu_read_smc_arg(smu, min);
185                 if (ret)
186                         goto failed;
187         }
188
189 failed:
190         mutex_unlock(&smu->mutex);
191         return ret;
192 }
193
194 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
195                               uint16_t level, uint32_t *value)
196 {
197         int ret = 0, clk_id = 0;
198         uint32_t param;
199
200         if (!value)
201                 return -EINVAL;
202
203         clk_id = smu_clk_get_index(smu, clk_type);
204         if (clk_id < 0)
205                 return clk_id;
206
207         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
208
209         ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
210                                           param);
211         if (ret)
212                 return ret;
213
214         ret = smu_read_smc_arg(smu, &param);
215         if (ret)
216                 return ret;
217
218         /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
219          * now, we un-support it */
220         *value = param & 0x7fffffff;
221
222         return ret;
223 }
224
225 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
226                             uint32_t *value)
227 {
228         return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
229 }
230
231 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
232                            bool gate)
233 {
234         int ret = 0;
235
236         switch (block_type) {
237         case AMD_IP_BLOCK_TYPE_UVD:
238                 ret = smu_dpm_set_uvd_enable(smu, gate);
239                 break;
240         case AMD_IP_BLOCK_TYPE_VCE:
241                 ret = smu_dpm_set_vce_enable(smu, gate);
242                 break;
243         case AMD_IP_BLOCK_TYPE_GFX:
244                 ret = smu_gfx_off_control(smu, gate);
245                 break;
246         default:
247                 break;
248         }
249
250         return ret;
251 }
252
253 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
254 {
255         /* not support power state */
256         return POWER_STATE_TYPE_DEFAULT;
257 }
258
259 int smu_get_power_num_states(struct smu_context *smu,
260                              struct pp_states_info *state_info)
261 {
262         if (!state_info)
263                 return -EINVAL;
264
265         /* not support power state */
266         memset(state_info, 0, sizeof(struct pp_states_info));
267         state_info->nums = 0;
268
269         return 0;
270 }
271
272 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
273                            void *data, uint32_t *size)
274 {
275         int ret = 0;
276
277         switch (sensor) {
278         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
279                 *((uint32_t *)data) = smu->pstate_sclk;
280                 *size = 4;
281                 break;
282         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
283                 *((uint32_t *)data) = smu->pstate_mclk;
284                 *size = 4;
285                 break;
286         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
287                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
288                 *size = 8;
289                 break;
290         case AMDGPU_PP_SENSOR_UVD_POWER:
291                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
292                 *size = 4;
293                 break;
294         case AMDGPU_PP_SENSOR_VCE_POWER:
295                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
296                 *size = 4;
297                 break;
298         default:
299                 ret = -EINVAL;
300                 break;
301         }
302
303         if (ret)
304                 *size = 0;
305
306         return ret;
307 }
308
309 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index,
310                      void *table_data, bool drv2smu)
311 {
312         struct smu_table_context *smu_table = &smu->smu_table;
313         struct smu_table *table = NULL;
314         int ret = 0;
315         int table_id = smu_table_get_index(smu, table_index);
316
317         if (!table_data || table_id >= smu_table->table_count)
318                 return -EINVAL;
319
320         table = &smu_table->tables[table_index];
321
322         if (drv2smu)
323                 memcpy(table->cpu_addr, table_data, table->size);
324
325         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
326                                           upper_32_bits(table->mc_address));
327         if (ret)
328                 return ret;
329         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
330                                           lower_32_bits(table->mc_address));
331         if (ret)
332                 return ret;
333         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
334                                           SMU_MSG_TransferTableDram2Smu :
335                                           SMU_MSG_TransferTableSmu2Dram,
336                                           table_id);
337         if (ret)
338                 return ret;
339
340         if (!drv2smu)
341                 memcpy(table_data, table->cpu_addr, table->size);
342
343         return ret;
344 }
345
346 bool is_support_sw_smu(struct amdgpu_device *adev)
347 {
348         if (adev->asic_type == CHIP_VEGA20)
349                 return (amdgpu_dpm == 2) ? true : false;
350         else if (adev->asic_type >= CHIP_NAVI10)
351                 return true;
352         else
353                 return false;
354 }
355
356 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
357 {
358         struct smu_table_context *smu_table = &smu->smu_table;
359
360         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
361                 return -EINVAL;
362
363         if (smu_table->hardcode_pptable)
364                 *table = smu_table->hardcode_pptable;
365         else
366                 *table = smu_table->power_play_table;
367
368         return smu_table->power_play_table_size;
369 }
370
371 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
372 {
373         struct smu_table_context *smu_table = &smu->smu_table;
374         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
375         int ret = 0;
376
377         if (!smu->pm_enabled)
378                 return -EINVAL;
379         if (header->usStructureSize != size) {
380                 pr_err("pp table size not matched !\n");
381                 return -EIO;
382         }
383
384         mutex_lock(&smu->mutex);
385         if (!smu_table->hardcode_pptable)
386                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
387         if (!smu_table->hardcode_pptable) {
388                 ret = -ENOMEM;
389                 goto failed;
390         }
391
392         memcpy(smu_table->hardcode_pptable, buf, size);
393         smu_table->power_play_table = smu_table->hardcode_pptable;
394         smu_table->power_play_table_size = size;
395         mutex_unlock(&smu->mutex);
396
397         ret = smu_reset(smu);
398         if (ret)
399                 pr_info("smu reset failed, ret = %d\n", ret);
400
401         return ret;
402
403 failed:
404         mutex_unlock(&smu->mutex);
405         return ret;
406 }
407
408 int smu_feature_init_dpm(struct smu_context *smu)
409 {
410         struct smu_feature *feature = &smu->smu_feature;
411         int ret = 0;
412         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
413
414         if (!smu->pm_enabled)
415                 return ret;
416         mutex_lock(&feature->mutex);
417         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
418         mutex_unlock(&feature->mutex);
419
420         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
421                                              SMU_FEATURE_MAX/32);
422         if (ret)
423                 return ret;
424
425         mutex_lock(&feature->mutex);
426         bitmap_or(feature->allowed, feature->allowed,
427                       (unsigned long *)allowed_feature_mask,
428                       feature->feature_num);
429         mutex_unlock(&feature->mutex);
430
431         return ret;
432 }
433
434 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
435 {
436         struct smu_feature *feature = &smu->smu_feature;
437         uint32_t feature_id;
438         int ret = 0;
439
440         feature_id = smu_feature_get_index(smu, mask);
441
442         WARN_ON(feature_id > feature->feature_num);
443
444         mutex_lock(&feature->mutex);
445         ret = test_bit(feature_id, feature->enabled);
446         mutex_unlock(&feature->mutex);
447
448         return ret;
449 }
450
451 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
452                             bool enable)
453 {
454         struct smu_feature *feature = &smu->smu_feature;
455         uint32_t feature_id;
456         int ret = 0;
457
458         feature_id = smu_feature_get_index(smu, mask);
459
460         WARN_ON(feature_id > feature->feature_num);
461
462         mutex_lock(&feature->mutex);
463         ret = smu_feature_update_enable_state(smu, feature_id, enable);
464         if (ret)
465                 goto failed;
466
467         if (enable)
468                 test_and_set_bit(feature_id, feature->enabled);
469         else
470                 test_and_clear_bit(feature_id, feature->enabled);
471
472 failed:
473         mutex_unlock(&feature->mutex);
474
475         return ret;
476 }
477
478 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
479 {
480         struct smu_feature *feature = &smu->smu_feature;
481         uint32_t feature_id;
482         int ret = 0;
483
484         feature_id = smu_feature_get_index(smu, mask);
485
486         WARN_ON(feature_id > feature->feature_num);
487
488         mutex_lock(&feature->mutex);
489         ret = test_bit(feature_id, feature->supported);
490         mutex_unlock(&feature->mutex);
491
492         return ret;
493 }
494
495 int smu_feature_set_supported(struct smu_context *smu,
496                               enum smu_feature_mask mask,
497                               bool enable)
498 {
499         struct smu_feature *feature = &smu->smu_feature;
500         uint32_t feature_id;
501         int ret = 0;
502
503         feature_id = smu_feature_get_index(smu, mask);
504
505         WARN_ON(feature_id > feature->feature_num);
506
507         mutex_lock(&feature->mutex);
508         if (enable)
509                 test_and_set_bit(feature_id, feature->supported);
510         else
511                 test_and_clear_bit(feature_id, feature->supported);
512         mutex_unlock(&feature->mutex);
513
514         return ret;
515 }
516
517 static int smu_set_funcs(struct amdgpu_device *adev)
518 {
519         struct smu_context *smu = &adev->smu;
520
521         switch (adev->asic_type) {
522         case CHIP_VEGA20:
523         case CHIP_NAVI10:
524                 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
525                         smu->od_enabled = true;
526                 smu_v11_0_set_smu_funcs(smu);
527                 break;
528         default:
529                 return -EINVAL;
530         }
531
532         return 0;
533 }
534
535 static int smu_early_init(void *handle)
536 {
537         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
538         struct smu_context *smu = &adev->smu;
539
540         smu->adev = adev;
541         smu->pm_enabled = !!amdgpu_dpm;
542         mutex_init(&smu->mutex);
543
544         return smu_set_funcs(adev);
545 }
546
547 static int smu_late_init(void *handle)
548 {
549         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
550         struct smu_context *smu = &adev->smu;
551
552         if (!smu->pm_enabled)
553                 return 0;
554         mutex_lock(&smu->mutex);
555         smu_handle_task(&adev->smu,
556                         smu->smu_dpm.dpm_level,
557                         AMD_PP_TASK_COMPLETE_INIT);
558         mutex_unlock(&smu->mutex);
559
560         return 0;
561 }
562
563 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
564                             uint16_t *size, uint8_t *frev, uint8_t *crev,
565                             uint8_t **addr)
566 {
567         struct amdgpu_device *adev = smu->adev;
568         uint16_t data_start;
569
570         if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
571                                            size, frev, crev, &data_start))
572                 return -EINVAL;
573
574         *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
575
576         return 0;
577 }
578
579 static int smu_initialize_pptable(struct smu_context *smu)
580 {
581         /* TODO */
582         return 0;
583 }
584
585 static int smu_smc_table_sw_init(struct smu_context *smu)
586 {
587         int ret;
588
589         ret = smu_initialize_pptable(smu);
590         if (ret) {
591                 pr_err("Failed to init smu_initialize_pptable!\n");
592                 return ret;
593         }
594
595         /**
596          * Create smu_table structure, and init smc tables such as
597          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
598          */
599         ret = smu_init_smc_tables(smu);
600         if (ret) {
601                 pr_err("Failed to init smc tables!\n");
602                 return ret;
603         }
604
605         /**
606          * Create smu_power_context structure, and allocate smu_dpm_context and
607          * context size to fill the smu_power_context data.
608          */
609         ret = smu_init_power(smu);
610         if (ret) {
611                 pr_err("Failed to init smu_init_power!\n");
612                 return ret;
613         }
614
615         return 0;
616 }
617
618 static int smu_smc_table_sw_fini(struct smu_context *smu)
619 {
620         int ret;
621
622         ret = smu_fini_smc_tables(smu);
623         if (ret) {
624                 pr_err("Failed to smu_fini_smc_tables!\n");
625                 return ret;
626         }
627
628         return 0;
629 }
630
631 static int smu_sw_init(void *handle)
632 {
633         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
634         struct smu_context *smu = &adev->smu;
635         int ret;
636
637         smu->pool_size = adev->pm.smu_prv_buffer_size;
638         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
639         mutex_init(&smu->smu_feature.mutex);
640         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
641         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
642         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
643
644         mutex_init(&smu->smu_baco.mutex);
645         smu->smu_baco.state = SMU_BACO_STATE_EXIT;
646         smu->smu_baco.platform_support = false;
647
648         smu->watermarks_bitmap = 0;
649         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
650         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
651
652         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
653         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
654         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
655         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
656         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
657         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
658         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
659         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
660
661         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
662         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
663         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
664         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
665         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
666         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
667         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
668         smu->display_config = &adev->pm.pm_display_cfg;
669
670         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
671         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
672         ret = smu_init_microcode(smu);
673         if (ret) {
674                 pr_err("Failed to load smu firmware!\n");
675                 return ret;
676         }
677
678         ret = smu_smc_table_sw_init(smu);
679         if (ret) {
680                 pr_err("Failed to sw init smc table!\n");
681                 return ret;
682         }
683
684         return 0;
685 }
686
687 static int smu_sw_fini(void *handle)
688 {
689         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
690         struct smu_context *smu = &adev->smu;
691         int ret;
692
693         ret = smu_smc_table_sw_fini(smu);
694         if (ret) {
695                 pr_err("Failed to sw fini smc table!\n");
696                 return ret;
697         }
698
699         ret = smu_fini_power(smu);
700         if (ret) {
701                 pr_err("Failed to init smu_fini_power!\n");
702                 return ret;
703         }
704
705         return 0;
706 }
707
708 static int smu_init_fb_allocations(struct smu_context *smu)
709 {
710         struct amdgpu_device *adev = smu->adev;
711         struct smu_table_context *smu_table = &smu->smu_table;
712         struct smu_table *tables = smu_table->tables;
713         uint32_t table_count = smu_table->table_count;
714         uint32_t i = 0;
715         int32_t ret = 0;
716
717         if (table_count <= 0)
718                 return -EINVAL;
719
720         for (i = 0 ; i < table_count; i++) {
721                 if (tables[i].size == 0)
722                         continue;
723                 ret = amdgpu_bo_create_kernel(adev,
724                                               tables[i].size,
725                                               tables[i].align,
726                                               tables[i].domain,
727                                               &tables[i].bo,
728                                               &tables[i].mc_address,
729                                               &tables[i].cpu_addr);
730                 if (ret)
731                         goto failed;
732         }
733
734         return 0;
735 failed:
736         for (; i > 0; i--) {
737                 if (tables[i].size == 0)
738                         continue;
739                 amdgpu_bo_free_kernel(&tables[i].bo,
740                                       &tables[i].mc_address,
741                                       &tables[i].cpu_addr);
742
743         }
744         return ret;
745 }
746
747 static int smu_fini_fb_allocations(struct smu_context *smu)
748 {
749         struct smu_table_context *smu_table = &smu->smu_table;
750         struct smu_table *tables = smu_table->tables;
751         uint32_t table_count = smu_table->table_count;
752         uint32_t i = 0;
753
754         if (table_count == 0 || tables == NULL)
755                 return 0;
756
757         for (i = 0 ; i < table_count; i++) {
758                 if (tables[i].size == 0)
759                         continue;
760                 amdgpu_bo_free_kernel(&tables[i].bo,
761                                       &tables[i].mc_address,
762                                       &tables[i].cpu_addr);
763         }
764
765         return 0;
766 }
767
768 static int smu_override_pcie_parameters(struct smu_context *smu)
769 {
770         struct amdgpu_device *adev = smu->adev;
771         uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
772         int ret;
773
774         if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
775                 pcie_gen = 3;
776         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
777                 pcie_gen = 2;
778         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
779                 pcie_gen = 1;
780         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
781                 pcie_gen = 0;
782
783         /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
784          * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
785          * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
786          */
787         if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
788                 pcie_width = 6;
789         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
790                 pcie_width = 5;
791         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
792                 pcie_width = 4;
793         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
794                 pcie_width = 3;
795         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
796                 pcie_width = 2;
797         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
798                 pcie_width = 1;
799
800         smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
801         ret = smu_send_smc_msg_with_param(smu,
802                                           SMU_MSG_OverridePcieParameters,
803                                           smu_pcie_arg);
804         if (ret)
805                 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
806         return ret;
807 }
808
809 static int smu_smc_table_hw_init(struct smu_context *smu,
810                                  bool initialize)
811 {
812         struct amdgpu_device *adev = smu->adev;
813         int ret;
814
815         if (smu_is_dpm_running(smu) && adev->in_suspend) {
816                 pr_info("dpm has been enabled\n");
817                 return 0;
818         }
819
820         ret = smu_init_display_count(smu, 0);
821         if (ret)
822                 return ret;
823
824         if (initialize) {
825                 /* get boot_values from vbios to set revision, gfxclk, and etc. */
826                 ret = smu_get_vbios_bootup_values(smu);
827                 if (ret)
828                         return ret;
829
830                 ret = smu_setup_pptable(smu);
831                 if (ret)
832                         return ret;
833
834                 ret = smu_get_clk_info_from_vbios(smu);
835                 if (ret)
836                         return ret;
837
838                 /*
839                  * check if the format_revision in vbios is up to pptable header
840                  * version, and the structure size is not 0.
841                  */
842                 ret = smu_check_pptable(smu);
843                 if (ret)
844                         return ret;
845
846                 /*
847                  * allocate vram bos to store smc table contents.
848                  */
849                 ret = smu_init_fb_allocations(smu);
850                 if (ret)
851                         return ret;
852
853                 /*
854                  * Parse pptable format and fill PPTable_t smc_pptable to
855                  * smu_table_context structure. And read the smc_dpm_table from vbios,
856                  * then fill it into smc_pptable.
857                  */
858                 ret = smu_parse_pptable(smu);
859                 if (ret)
860                         return ret;
861
862                 /*
863                  * Send msg GetDriverIfVersion to check if the return value is equal
864                  * with DRIVER_IF_VERSION of smc header.
865                  */
866                 ret = smu_check_fw_version(smu);
867                 if (ret)
868                         return ret;
869         }
870
871         /*
872          * Copy pptable bo in the vram to smc with SMU MSGs such as
873          * SetDriverDramAddr and TransferTableDram2Smu.
874          */
875         ret = smu_write_pptable(smu);
876         if (ret)
877                 return ret;
878
879         /* issue RunAfllBtc msg */
880         ret = smu_run_afll_btc(smu);
881         if (ret)
882                 return ret;
883
884         ret = smu_feature_set_allowed_mask(smu);
885         if (ret)
886                 return ret;
887
888         ret = smu_system_features_control(smu, true);
889         if (ret)
890                 return ret;
891
892         ret = smu_override_pcie_parameters(smu);
893         if (ret)
894                 return ret;
895
896         ret = smu_notify_display_change(smu);
897         if (ret)
898                 return ret;
899
900         /*
901          * Set min deep sleep dce fclk with bootup value from vbios via
902          * SetMinDeepSleepDcefclk MSG.
903          */
904         ret = smu_set_min_dcef_deep_sleep(smu);
905         if (ret)
906                 return ret;
907
908         /*
909          * Set initialized values (get from vbios) to dpm tables context such as
910          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
911          * type of clks.
912          */
913         if (initialize) {
914                 ret = smu_populate_smc_pptable(smu);
915                 if (ret)
916                         return ret;
917
918                 ret = smu_init_max_sustainable_clocks(smu);
919                 if (ret)
920                         return ret;
921         }
922
923         ret = smu_set_default_od_settings(smu, initialize);
924         if (ret)
925                 return ret;
926
927         if (initialize) {
928                 ret = smu_populate_umd_state_clk(smu);
929                 if (ret)
930                         return ret;
931
932                 ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
933                 if (ret)
934                         return ret;
935         }
936
937         /*
938          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
939          */
940         ret = smu_set_tool_table_location(smu);
941
942         if (!smu_is_dpm_running(smu))
943                 pr_info("dpm has been disabled\n");
944
945         return ret;
946 }
947
948 /**
949  * smu_alloc_memory_pool - allocate memory pool in the system memory
950  *
951  * @smu: amdgpu_device pointer
952  *
953  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
954  * and DramLogSetDramAddr can notify it changed.
955  *
956  * Returns 0 on success, error on failure.
957  */
958 static int smu_alloc_memory_pool(struct smu_context *smu)
959 {
960         struct amdgpu_device *adev = smu->adev;
961         struct smu_table_context *smu_table = &smu->smu_table;
962         struct smu_table *memory_pool = &smu_table->memory_pool;
963         uint64_t pool_size = smu->pool_size;
964         int ret = 0;
965
966         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
967                 return ret;
968
969         memory_pool->size = pool_size;
970         memory_pool->align = PAGE_SIZE;
971         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
972
973         switch (pool_size) {
974         case SMU_MEMORY_POOL_SIZE_256_MB:
975         case SMU_MEMORY_POOL_SIZE_512_MB:
976         case SMU_MEMORY_POOL_SIZE_1_GB:
977         case SMU_MEMORY_POOL_SIZE_2_GB:
978                 ret = amdgpu_bo_create_kernel(adev,
979                                               memory_pool->size,
980                                               memory_pool->align,
981                                               memory_pool->domain,
982                                               &memory_pool->bo,
983                                               &memory_pool->mc_address,
984                                               &memory_pool->cpu_addr);
985                 break;
986         default:
987                 break;
988         }
989
990         return ret;
991 }
992
993 static int smu_free_memory_pool(struct smu_context *smu)
994 {
995         struct smu_table_context *smu_table = &smu->smu_table;
996         struct smu_table *memory_pool = &smu_table->memory_pool;
997         int ret = 0;
998
999         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1000                 return ret;
1001
1002         amdgpu_bo_free_kernel(&memory_pool->bo,
1003                               &memory_pool->mc_address,
1004                               &memory_pool->cpu_addr);
1005
1006         memset(memory_pool, 0, sizeof(struct smu_table));
1007
1008         return ret;
1009 }
1010
1011 static int smu_hw_init(void *handle)
1012 {
1013         int ret;
1014         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1015         struct smu_context *smu = &adev->smu;
1016
1017         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1018                 ret = smu_check_fw_status(smu);
1019                 if (ret) {
1020                         pr_err("SMC firmware status is not correct\n");
1021                         return ret;
1022                 }
1023         }
1024
1025         ret = smu_feature_init_dpm(smu);
1026         if (ret)
1027                 goto failed;
1028
1029         ret = smu_smc_table_hw_init(smu, true);
1030         if (ret)
1031                 goto failed;
1032
1033         ret = smu_alloc_memory_pool(smu);
1034         if (ret)
1035                 goto failed;
1036
1037         /*
1038          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1039          * pool location.
1040          */
1041         ret = smu_notify_memory_pool_location(smu);
1042         if (ret)
1043                 goto failed;
1044
1045         ret = smu_start_thermal_control(smu);
1046         if (ret)
1047                 goto failed;
1048
1049         ret = smu_register_irq_handler(smu);
1050         if (ret)
1051                 goto failed;
1052
1053         if (!smu->pm_enabled)
1054                 adev->pm.dpm_enabled = false;
1055         else
1056                 adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1057
1058         pr_info("SMU is initialized successfully!\n");
1059
1060         return 0;
1061
1062 failed:
1063         return ret;
1064 }
1065
1066 static int smu_hw_fini(void *handle)
1067 {
1068         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1069         struct smu_context *smu = &adev->smu;
1070         struct smu_table_context *table_context = &smu->smu_table;
1071         int ret = 0;
1072
1073         kfree(table_context->driver_pptable);
1074         table_context->driver_pptable = NULL;
1075
1076         kfree(table_context->max_sustainable_clocks);
1077         table_context->max_sustainable_clocks = NULL;
1078
1079         kfree(table_context->overdrive_table);
1080         table_context->overdrive_table = NULL;
1081
1082         kfree(smu->irq_source);
1083         smu->irq_source = NULL;
1084
1085         ret = smu_fini_fb_allocations(smu);
1086         if (ret)
1087                 return ret;
1088
1089         ret = smu_free_memory_pool(smu);
1090         if (ret)
1091                 return ret;
1092
1093         return 0;
1094 }
1095
1096 int smu_reset(struct smu_context *smu)
1097 {
1098         struct amdgpu_device *adev = smu->adev;
1099         int ret = 0;
1100
1101         ret = smu_hw_fini(adev);
1102         if (ret)
1103                 return ret;
1104
1105         ret = smu_hw_init(adev);
1106         if (ret)
1107                 return ret;
1108
1109         return ret;
1110 }
1111
1112 static int smu_suspend(void *handle)
1113 {
1114         int ret;
1115         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1116         struct smu_context *smu = &adev->smu;
1117         bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
1118
1119         ret = smu_system_features_control(smu, false);
1120         if (ret)
1121                 return ret;
1122
1123         if (adev->in_gpu_reset && baco_feature_is_enabled) {
1124                 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1125                 if (ret) {
1126                         pr_warn("set BACO feature enabled failed, return %d\n", ret);
1127                         return ret;
1128                 }
1129         }
1130
1131         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1132
1133         if (adev->asic_type >= CHIP_NAVI10 &&
1134             adev->gfx.rlc.funcs->stop)
1135                 adev->gfx.rlc.funcs->stop(adev);
1136
1137         return 0;
1138 }
1139
1140 static int smu_resume(void *handle)
1141 {
1142         int ret;
1143         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1144         struct smu_context *smu = &adev->smu;
1145
1146         pr_info("SMU is resuming...\n");
1147
1148         mutex_lock(&smu->mutex);
1149
1150         ret = smu_smc_table_hw_init(smu, false);
1151         if (ret)
1152                 goto failed;
1153
1154         ret = smu_start_thermal_control(smu);
1155         if (ret)
1156                 goto failed;
1157
1158         mutex_unlock(&smu->mutex);
1159
1160         pr_info("SMU is resumed successfully!\n");
1161
1162         return 0;
1163 failed:
1164         mutex_unlock(&smu->mutex);
1165         return ret;
1166 }
1167
1168 int smu_display_configuration_change(struct smu_context *smu,
1169                                      const struct amd_pp_display_configuration *display_config)
1170 {
1171         int index = 0;
1172         int num_of_active_display = 0;
1173
1174         if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1175                 return -EINVAL;
1176
1177         if (!display_config)
1178                 return -EINVAL;
1179
1180         mutex_lock(&smu->mutex);
1181
1182         smu_set_deep_sleep_dcefclk(smu,
1183                                    display_config->min_dcef_deep_sleep_set_clk / 100);
1184
1185         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1186                 if (display_config->displays[index].controller_id != 0)
1187                         num_of_active_display++;
1188         }
1189
1190         smu_set_active_display_count(smu, num_of_active_display);
1191
1192         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1193                            display_config->cpu_cc6_disable,
1194                            display_config->cpu_pstate_disable,
1195                            display_config->nb_pstate_switch_disable);
1196
1197         mutex_unlock(&smu->mutex);
1198
1199         return 0;
1200 }
1201
1202 static int smu_get_clock_info(struct smu_context *smu,
1203                               struct smu_clock_info *clk_info,
1204                               enum smu_perf_level_designation designation)
1205 {
1206         int ret;
1207         struct smu_performance_level level = {0};
1208
1209         if (!clk_info)
1210                 return -EINVAL;
1211
1212         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1213         if (ret)
1214                 return -EINVAL;
1215
1216         clk_info->min_mem_clk = level.memory_clock;
1217         clk_info->min_eng_clk = level.core_clock;
1218         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1219
1220         ret = smu_get_perf_level(smu, designation, &level);
1221         if (ret)
1222                 return -EINVAL;
1223
1224         clk_info->min_mem_clk = level.memory_clock;
1225         clk_info->min_eng_clk = level.core_clock;
1226         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1227
1228         return 0;
1229 }
1230
1231 int smu_get_current_clocks(struct smu_context *smu,
1232                            struct amd_pp_clock_info *clocks)
1233 {
1234         struct amd_pp_simple_clock_info simple_clocks = {0};
1235         struct smu_clock_info hw_clocks;
1236         int ret = 0;
1237
1238         if (!is_support_sw_smu(smu->adev))
1239                 return -EINVAL;
1240
1241         mutex_lock(&smu->mutex);
1242
1243         smu_get_dal_power_level(smu, &simple_clocks);
1244
1245         if (smu->support_power_containment)
1246                 ret = smu_get_clock_info(smu, &hw_clocks,
1247                                          PERF_LEVEL_POWER_CONTAINMENT);
1248         else
1249                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1250
1251         if (ret) {
1252                 pr_err("Error in smu_get_clock_info\n");
1253                 goto failed;
1254         }
1255
1256         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1257         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1258         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1259         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1260         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1261         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1262         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1263         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1264
1265         if (simple_clocks.level == 0)
1266                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1267         else
1268                 clocks->max_clocks_state = simple_clocks.level;
1269
1270         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1271                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1272                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1273         }
1274
1275 failed:
1276         mutex_unlock(&smu->mutex);
1277         return ret;
1278 }
1279
1280 static int smu_set_clockgating_state(void *handle,
1281                                      enum amd_clockgating_state state)
1282 {
1283         return 0;
1284 }
1285
1286 static int smu_set_powergating_state(void *handle,
1287                                      enum amd_powergating_state state)
1288 {
1289         return 0;
1290 }
1291
1292 static int smu_enable_umd_pstate(void *handle,
1293                       enum amd_dpm_forced_level *level)
1294 {
1295         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1296                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1297                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1298                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1299
1300         struct smu_context *smu = (struct smu_context*)(handle);
1301         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1302         if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
1303                 return -EINVAL;
1304
1305         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1306                 /* enter umd pstate, save current level, disable gfx cg*/
1307                 if (*level & profile_mode_mask) {
1308                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1309                         smu_dpm_ctx->enable_umd_pstate = true;
1310                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1311                                                                AMD_IP_BLOCK_TYPE_GFX,
1312                                                                AMD_CG_STATE_UNGATE);
1313                         amdgpu_device_ip_set_powergating_state(smu->adev,
1314                                                                AMD_IP_BLOCK_TYPE_GFX,
1315                                                                AMD_PG_STATE_UNGATE);
1316                 }
1317         } else {
1318                 /* exit umd pstate, restore level, enable gfx cg*/
1319                 if (!(*level & profile_mode_mask)) {
1320                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1321                                 *level = smu_dpm_ctx->saved_dpm_level;
1322                         smu_dpm_ctx->enable_umd_pstate = false;
1323                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1324                                                                AMD_IP_BLOCK_TYPE_GFX,
1325                                                                AMD_CG_STATE_GATE);
1326                         amdgpu_device_ip_set_powergating_state(smu->adev,
1327                                                                AMD_IP_BLOCK_TYPE_GFX,
1328                                                                AMD_PG_STATE_GATE);
1329                 }
1330         }
1331
1332         return 0;
1333 }
1334
1335 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1336                                    enum amd_dpm_forced_level level,
1337                                    bool skip_display_settings)
1338 {
1339         int ret = 0;
1340         int index = 0;
1341         uint32_t sclk_mask, mclk_mask, soc_mask;
1342         long workload;
1343         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1344
1345         if (!smu->pm_enabled)
1346                 return -EINVAL;
1347         if (!skip_display_settings) {
1348                 ret = smu_display_config_changed(smu);
1349                 if (ret) {
1350                         pr_err("Failed to change display config!");
1351                         return ret;
1352                 }
1353         }
1354
1355         if (!smu->pm_enabled)
1356                 return -EINVAL;
1357         ret = smu_apply_clocks_adjust_rules(smu);
1358         if (ret) {
1359                 pr_err("Failed to apply clocks adjust rules!");
1360                 return ret;
1361         }
1362
1363         if (!skip_display_settings) {
1364                 ret = smu_notify_smc_dispaly_config(smu);
1365                 if (ret) {
1366                         pr_err("Failed to notify smc display config!");
1367                         return ret;
1368                 }
1369         }
1370
1371         if (smu_dpm_ctx->dpm_level != level) {
1372                 switch (level) {
1373                 case AMD_DPM_FORCED_LEVEL_HIGH:
1374                         ret = smu_force_dpm_limit_value(smu, true);
1375                         break;
1376                 case AMD_DPM_FORCED_LEVEL_LOW:
1377                         ret = smu_force_dpm_limit_value(smu, false);
1378                         break;
1379
1380                 case AMD_DPM_FORCED_LEVEL_AUTO:
1381                         ret = smu_unforce_dpm_levels(smu);
1382                         break;
1383
1384                 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1385                 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1386                 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1387                 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1388                         ret = smu_get_profiling_clk_mask(smu, level,
1389                                                          &sclk_mask,
1390                                                          &mclk_mask,
1391                                                          &soc_mask);
1392                         if (ret)
1393                                 return ret;
1394                         smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
1395                         smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
1396                         smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
1397                         break;
1398
1399                 case AMD_DPM_FORCED_LEVEL_MANUAL:
1400                 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1401                 default:
1402                         break;
1403                 }
1404
1405                 if (!ret)
1406                         smu_dpm_ctx->dpm_level = level;
1407         }
1408
1409         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1410                 index = fls(smu->workload_mask);
1411                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1412                 workload = smu->workload_setting[index];
1413
1414                 if (smu->power_profile_mode != workload)
1415                         smu_set_power_profile_mode(smu, &workload, 0);
1416         }
1417
1418         return ret;
1419 }
1420
1421 int smu_handle_task(struct smu_context *smu,
1422                     enum amd_dpm_forced_level level,
1423                     enum amd_pp_task task_id)
1424 {
1425         int ret = 0;
1426
1427         switch (task_id) {
1428         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1429                 ret = smu_pre_display_config_changed(smu);
1430                 if (ret)
1431                         return ret;
1432                 ret = smu_set_cpu_power_state(smu);
1433                 if (ret)
1434                         return ret;
1435                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1436                 break;
1437         case AMD_PP_TASK_COMPLETE_INIT:
1438         case AMD_PP_TASK_READJUST_POWER_STATE:
1439                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1440                 break;
1441         default:
1442                 break;
1443         }
1444
1445         return ret;
1446 }
1447
1448 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1449 {
1450         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1451
1452         if (!smu_dpm_ctx->dpm_context)
1453                 return -EINVAL;
1454
1455         mutex_lock(&(smu->mutex));
1456         if (smu_dpm_ctx->dpm_level != smu_dpm_ctx->saved_dpm_level) {
1457                 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1458         }
1459         mutex_unlock(&(smu->mutex));
1460
1461         return smu_dpm_ctx->dpm_level;
1462 }
1463
1464 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1465 {
1466         int ret = 0;
1467         int i;
1468         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1469
1470         if (!smu_dpm_ctx->dpm_context)
1471                 return -EINVAL;
1472
1473         for (i = 0; i < smu->adev->num_ip_blocks; i++) {
1474                 if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
1475                         break;
1476         }
1477
1478
1479         smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
1480         ret = smu_handle_task(smu, level,
1481                               AMD_PP_TASK_READJUST_POWER_STATE);
1482         if (ret)
1483                 return ret;
1484
1485         mutex_lock(&smu->mutex);
1486         smu_dpm_ctx->dpm_level = level;
1487         mutex_unlock(&smu->mutex);
1488
1489         return ret;
1490 }
1491
1492 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1493 {
1494         int ret = 0;
1495
1496         mutex_lock(&smu->mutex);
1497         ret = smu_init_display_count(smu, count);
1498         mutex_unlock(&smu->mutex);
1499
1500         return ret;
1501 }
1502
1503 const struct amd_ip_funcs smu_ip_funcs = {
1504         .name = "smu",
1505         .early_init = smu_early_init,
1506         .late_init = smu_late_init,
1507         .sw_init = smu_sw_init,
1508         .sw_fini = smu_sw_fini,
1509         .hw_init = smu_hw_init,
1510         .hw_fini = smu_hw_fini,
1511         .suspend = smu_suspend,
1512         .resume = smu_resume,
1513         .is_idle = NULL,
1514         .check_soft_reset = NULL,
1515         .wait_for_idle = NULL,
1516         .soft_reset = NULL,
1517         .set_clockgating_state = smu_set_clockgating_state,
1518         .set_powergating_state = smu_set_powergating_state,
1519         .enable_umd_pstate = smu_enable_umd_pstate,
1520 };
1521
1522 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1523 {
1524         .type = AMD_IP_BLOCK_TYPE_SMC,
1525         .major = 11,
1526         .minor = 0,
1527         .rev = 0,
1528         .funcs = &smu_ip_funcs,
1529 };