]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drm/amd/powerplay: move power_dpm_force_performance_level to amdgpu_smu file
[linux.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include "pp_debug.h"
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
29 #include "smu_v11_0.h"
30 #include "atom.h"
31 #include "amd_pcie.h"
32
33 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
34 {
35         int ret = 0;
36
37         if (!if_version && !smu_version)
38                 return -EINVAL;
39
40         if (if_version) {
41                 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
42                 if (ret)
43                         return ret;
44
45                 ret = smu_read_smc_arg(smu, if_version);
46                 if (ret)
47                         return ret;
48         }
49
50         if (smu_version) {
51                 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
52                 if (ret)
53                         return ret;
54
55                 ret = smu_read_smc_arg(smu, smu_version);
56                 if (ret)
57                         return ret;
58         }
59
60         return ret;
61 }
62
63 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
64                             uint32_t min, uint32_t max)
65 {
66         int ret = 0, clk_id = 0;
67         uint32_t param;
68
69         if (min <= 0 && max <= 0)
70                 return -EINVAL;
71
72         clk_id = smu_clk_get_index(smu, clk_type);
73         if (clk_id < 0)
74                 return clk_id;
75
76         if (max > 0) {
77                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
78                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
79                                                   param);
80                 if (ret)
81                         return ret;
82         }
83
84         if (min > 0) {
85                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
86                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
87                                                   param);
88                 if (ret)
89                         return ret;
90         }
91
92
93         return ret;
94 }
95
96 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
97                             uint32_t min, uint32_t max)
98 {
99         int ret = 0, clk_id = 0;
100         uint32_t param;
101
102         if (min <= 0 && max <= 0)
103                 return -EINVAL;
104
105         clk_id = smu_clk_get_index(smu, clk_type);
106         if (clk_id < 0)
107                 return clk_id;
108
109         if (max > 0) {
110                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
111                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
112                                                   param);
113                 if (ret)
114                         return ret;
115         }
116
117         if (min > 0) {
118                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
119                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
120                                                   param);
121                 if (ret)
122                         return ret;
123         }
124
125
126         return ret;
127 }
128
129 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
130                            uint32_t *min, uint32_t *max)
131 {
132         int ret = 0, clk_id = 0;
133         uint32_t param = 0;
134
135         if (!min && !max)
136                 return -EINVAL;
137
138         clk_id = smu_clk_get_index(smu, clk_type);
139         if (clk_id < 0)
140                 return clk_id;
141
142         param = (clk_id & 0xffff) << 16;
143
144         if (max) {
145                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
146                 if (ret)
147                         return ret;
148                 ret = smu_read_smc_arg(smu, max);
149                 if (ret)
150                         return ret;
151         }
152
153         if (min) {
154                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
155                 if (ret)
156                         return ret;
157                 ret = smu_read_smc_arg(smu, min);
158                 if (ret)
159                         return ret;
160         }
161
162         return ret;
163 }
164
165 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
166                               uint16_t level, uint32_t *value)
167 {
168         int ret = 0, clk_id = 0;
169         uint32_t param;
170
171         if (!value)
172                 return -EINVAL;
173
174         clk_id = smu_clk_get_index(smu, clk_type);
175         if (clk_id < 0)
176                 return clk_id;
177
178         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
179
180         ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
181                                           param);
182         if (ret)
183                 return ret;
184
185         ret = smu_read_smc_arg(smu, &param);
186         if (ret)
187                 return ret;
188
189         /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
190          * now, we un-support it */
191         *value = param & 0x7fffffff;
192
193         return ret;
194 }
195
196 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
197                             uint32_t *value)
198 {
199         return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
200 }
201
202 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
203                            bool gate)
204 {
205         int ret = 0;
206
207         switch (block_type) {
208         case AMD_IP_BLOCK_TYPE_UVD:
209                 ret = smu_dpm_set_uvd_enable(smu, gate);
210                 break;
211         case AMD_IP_BLOCK_TYPE_VCE:
212                 ret = smu_dpm_set_vce_enable(smu, gate);
213                 break;
214         default:
215                 break;
216         }
217
218         return ret;
219 }
220
221 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
222 {
223         /* not support power state */
224         return POWER_STATE_TYPE_DEFAULT;
225 }
226
227 int smu_get_power_num_states(struct smu_context *smu,
228                              struct pp_states_info *state_info)
229 {
230         if (!state_info)
231                 return -EINVAL;
232
233         /* not support power state */
234         memset(state_info, 0, sizeof(struct pp_states_info));
235         state_info->nums = 0;
236
237         return 0;
238 }
239
240 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
241                            void *data, uint32_t *size)
242 {
243         int ret = 0;
244
245         switch (sensor) {
246         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
247                 *((uint32_t *)data) = smu->pstate_sclk;
248                 *size = 4;
249                 break;
250         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
251                 *((uint32_t *)data) = smu->pstate_mclk;
252                 *size = 4;
253                 break;
254         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
255                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
256                 *size = 8;
257                 break;
258         case AMDGPU_PP_SENSOR_UVD_POWER:
259                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
260                 *size = 4;
261                 break;
262         case AMDGPU_PP_SENSOR_VCE_POWER:
263                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
264                 *size = 4;
265                 break;
266         default:
267                 ret = -EINVAL;
268                 break;
269         }
270
271         if (ret)
272                 *size = 0;
273
274         return ret;
275 }
276
277 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index,
278                      void *table_data, bool drv2smu)
279 {
280         struct smu_table_context *smu_table = &smu->smu_table;
281         struct smu_table *table = NULL;
282         int ret = 0;
283         int table_id = smu_table_get_index(smu, table_index);
284
285         if (!table_data || table_id >= smu_table->table_count)
286                 return -EINVAL;
287
288         table = &smu_table->tables[table_index];
289
290         if (drv2smu)
291                 memcpy(table->cpu_addr, table_data, table->size);
292
293         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
294                                           upper_32_bits(table->mc_address));
295         if (ret)
296                 return ret;
297         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
298                                           lower_32_bits(table->mc_address));
299         if (ret)
300                 return ret;
301         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
302                                           SMU_MSG_TransferTableDram2Smu :
303                                           SMU_MSG_TransferTableSmu2Dram,
304                                           table_id);
305         if (ret)
306                 return ret;
307
308         if (!drv2smu)
309                 memcpy(table_data, table->cpu_addr, table->size);
310
311         return ret;
312 }
313
314 bool is_support_sw_smu(struct amdgpu_device *adev)
315 {
316         if (adev->asic_type == CHIP_VEGA20)
317                 return (amdgpu_dpm == 2) ? true : false;
318         else if (adev->asic_type >= CHIP_NAVI10)
319                 return true;
320         else
321                 return false;
322 }
323
324 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
325 {
326         struct smu_table_context *smu_table = &smu->smu_table;
327
328         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
329                 return -EINVAL;
330
331         if (smu_table->hardcode_pptable)
332                 *table = smu_table->hardcode_pptable;
333         else
334                 *table = smu_table->power_play_table;
335
336         return smu_table->power_play_table_size;
337 }
338
339 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
340 {
341         struct smu_table_context *smu_table = &smu->smu_table;
342         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
343         int ret = 0;
344
345         if (!smu->pm_enabled)
346                 return -EINVAL;
347         if (header->usStructureSize != size) {
348                 pr_err("pp table size not matched !\n");
349                 return -EIO;
350         }
351
352         mutex_lock(&smu->mutex);
353         if (!smu_table->hardcode_pptable)
354                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
355         if (!smu_table->hardcode_pptable) {
356                 ret = -ENOMEM;
357                 goto failed;
358         }
359
360         memcpy(smu_table->hardcode_pptable, buf, size);
361         smu_table->power_play_table = smu_table->hardcode_pptable;
362         smu_table->power_play_table_size = size;
363         mutex_unlock(&smu->mutex);
364
365         ret = smu_reset(smu);
366         if (ret)
367                 pr_info("smu reset failed, ret = %d\n", ret);
368
369         return ret;
370
371 failed:
372         mutex_unlock(&smu->mutex);
373         return ret;
374 }
375
376 int smu_feature_init_dpm(struct smu_context *smu)
377 {
378         struct smu_feature *feature = &smu->smu_feature;
379         int ret = 0;
380         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
381
382         if (!smu->pm_enabled)
383                 return ret;
384         mutex_lock(&feature->mutex);
385         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
386         mutex_unlock(&feature->mutex);
387
388         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
389                                              SMU_FEATURE_MAX/32);
390         if (ret)
391                 return ret;
392
393         mutex_lock(&feature->mutex);
394         bitmap_or(feature->allowed, feature->allowed,
395                       (unsigned long *)allowed_feature_mask,
396                       feature->feature_num);
397         mutex_unlock(&feature->mutex);
398
399         return ret;
400 }
401
402 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
403 {
404         struct smu_feature *feature = &smu->smu_feature;
405         uint32_t feature_id;
406         int ret = 0;
407
408         feature_id = smu_feature_get_index(smu, mask);
409
410         WARN_ON(feature_id > feature->feature_num);
411
412         mutex_lock(&feature->mutex);
413         ret = test_bit(feature_id, feature->enabled);
414         mutex_unlock(&feature->mutex);
415
416         return ret;
417 }
418
419 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
420                             bool enable)
421 {
422         struct smu_feature *feature = &smu->smu_feature;
423         uint32_t feature_id;
424         int ret = 0;
425
426         feature_id = smu_feature_get_index(smu, mask);
427
428         WARN_ON(feature_id > feature->feature_num);
429
430         mutex_lock(&feature->mutex);
431         ret = smu_feature_update_enable_state(smu, feature_id, enable);
432         if (ret)
433                 goto failed;
434
435         if (enable)
436                 test_and_set_bit(feature_id, feature->enabled);
437         else
438                 test_and_clear_bit(feature_id, feature->enabled);
439
440 failed:
441         mutex_unlock(&feature->mutex);
442
443         return ret;
444 }
445
446 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
447 {
448         struct smu_feature *feature = &smu->smu_feature;
449         uint32_t feature_id;
450         int ret = 0;
451
452         feature_id = smu_feature_get_index(smu, mask);
453
454         WARN_ON(feature_id > feature->feature_num);
455
456         mutex_lock(&feature->mutex);
457         ret = test_bit(feature_id, feature->supported);
458         mutex_unlock(&feature->mutex);
459
460         return ret;
461 }
462
463 int smu_feature_set_supported(struct smu_context *smu,
464                               enum smu_feature_mask mask,
465                               bool enable)
466 {
467         struct smu_feature *feature = &smu->smu_feature;
468         uint32_t feature_id;
469         int ret = 0;
470
471         feature_id = smu_feature_get_index(smu, mask);
472
473         WARN_ON(feature_id > feature->feature_num);
474
475         mutex_lock(&feature->mutex);
476         if (enable)
477                 test_and_set_bit(feature_id, feature->supported);
478         else
479                 test_and_clear_bit(feature_id, feature->supported);
480         mutex_unlock(&feature->mutex);
481
482         return ret;
483 }
484
485 static int smu_set_funcs(struct amdgpu_device *adev)
486 {
487         struct smu_context *smu = &adev->smu;
488
489         switch (adev->asic_type) {
490         case CHIP_VEGA20:
491         case CHIP_NAVI10:
492                 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
493                         smu->od_enabled = true;
494                 smu_v11_0_set_smu_funcs(smu);
495                 break;
496         default:
497                 return -EINVAL;
498         }
499
500         return 0;
501 }
502
503 static int smu_early_init(void *handle)
504 {
505         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
506         struct smu_context *smu = &adev->smu;
507
508         smu->adev = adev;
509         smu->pm_enabled = !!amdgpu_dpm;
510         mutex_init(&smu->mutex);
511
512         return smu_set_funcs(adev);
513 }
514
515 static int smu_late_init(void *handle)
516 {
517         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
518         struct smu_context *smu = &adev->smu;
519
520         if (!smu->pm_enabled)
521                 return 0;
522         mutex_lock(&smu->mutex);
523         smu_handle_task(&adev->smu,
524                         smu->smu_dpm.dpm_level,
525                         AMD_PP_TASK_COMPLETE_INIT);
526         mutex_unlock(&smu->mutex);
527
528         return 0;
529 }
530
531 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
532                             uint16_t *size, uint8_t *frev, uint8_t *crev,
533                             uint8_t **addr)
534 {
535         struct amdgpu_device *adev = smu->adev;
536         uint16_t data_start;
537
538         if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
539                                            size, frev, crev, &data_start))
540                 return -EINVAL;
541
542         *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
543
544         return 0;
545 }
546
547 static int smu_initialize_pptable(struct smu_context *smu)
548 {
549         /* TODO */
550         return 0;
551 }
552
553 static int smu_smc_table_sw_init(struct smu_context *smu)
554 {
555         int ret;
556
557         ret = smu_initialize_pptable(smu);
558         if (ret) {
559                 pr_err("Failed to init smu_initialize_pptable!\n");
560                 return ret;
561         }
562
563         /**
564          * Create smu_table structure, and init smc tables such as
565          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
566          */
567         ret = smu_init_smc_tables(smu);
568         if (ret) {
569                 pr_err("Failed to init smc tables!\n");
570                 return ret;
571         }
572
573         /**
574          * Create smu_power_context structure, and allocate smu_dpm_context and
575          * context size to fill the smu_power_context data.
576          */
577         ret = smu_init_power(smu);
578         if (ret) {
579                 pr_err("Failed to init smu_init_power!\n");
580                 return ret;
581         }
582
583         return 0;
584 }
585
586 static int smu_smc_table_sw_fini(struct smu_context *smu)
587 {
588         int ret;
589
590         ret = smu_fini_smc_tables(smu);
591         if (ret) {
592                 pr_err("Failed to smu_fini_smc_tables!\n");
593                 return ret;
594         }
595
596         return 0;
597 }
598
599 static int smu_sw_init(void *handle)
600 {
601         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
602         struct smu_context *smu = &adev->smu;
603         int ret;
604
605         smu->pool_size = adev->pm.smu_prv_buffer_size;
606         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
607         mutex_init(&smu->smu_feature.mutex);
608         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
609         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
610         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
611         smu->watermarks_bitmap = 0;
612         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
613         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
614
615         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
616         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
617         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
618         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
619         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
620         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
621         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
622         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
623
624         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
625         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
626         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
627         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
628         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
629         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
630         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
631         smu->display_config = &adev->pm.pm_display_cfg;
632
633         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
634         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
635         ret = smu_init_microcode(smu);
636         if (ret) {
637                 pr_err("Failed to load smu firmware!\n");
638                 return ret;
639         }
640
641         ret = smu_smc_table_sw_init(smu);
642         if (ret) {
643                 pr_err("Failed to sw init smc table!\n");
644                 return ret;
645         }
646
647         return 0;
648 }
649
650 static int smu_sw_fini(void *handle)
651 {
652         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
653         struct smu_context *smu = &adev->smu;
654         int ret;
655
656         ret = smu_smc_table_sw_fini(smu);
657         if (ret) {
658                 pr_err("Failed to sw fini smc table!\n");
659                 return ret;
660         }
661
662         ret = smu_fini_power(smu);
663         if (ret) {
664                 pr_err("Failed to init smu_fini_power!\n");
665                 return ret;
666         }
667
668         return 0;
669 }
670
671 static int smu_init_fb_allocations(struct smu_context *smu)
672 {
673         struct amdgpu_device *adev = smu->adev;
674         struct smu_table_context *smu_table = &smu->smu_table;
675         struct smu_table *tables = smu_table->tables;
676         uint32_t table_count = smu_table->table_count;
677         uint32_t i = 0;
678         int32_t ret = 0;
679
680         if (table_count <= 0)
681                 return -EINVAL;
682
683         for (i = 0 ; i < table_count; i++) {
684                 if (tables[i].size == 0)
685                         continue;
686                 ret = amdgpu_bo_create_kernel(adev,
687                                               tables[i].size,
688                                               tables[i].align,
689                                               tables[i].domain,
690                                               &tables[i].bo,
691                                               &tables[i].mc_address,
692                                               &tables[i].cpu_addr);
693                 if (ret)
694                         goto failed;
695         }
696
697         return 0;
698 failed:
699         for (; i > 0; i--) {
700                 if (tables[i].size == 0)
701                         continue;
702                 amdgpu_bo_free_kernel(&tables[i].bo,
703                                       &tables[i].mc_address,
704                                       &tables[i].cpu_addr);
705
706         }
707         return ret;
708 }
709
710 static int smu_fini_fb_allocations(struct smu_context *smu)
711 {
712         struct smu_table_context *smu_table = &smu->smu_table;
713         struct smu_table *tables = smu_table->tables;
714         uint32_t table_count = smu_table->table_count;
715         uint32_t i = 0;
716
717         if (table_count == 0 || tables == NULL)
718                 return 0;
719
720         for (i = 0 ; i < table_count; i++) {
721                 if (tables[i].size == 0)
722                         continue;
723                 amdgpu_bo_free_kernel(&tables[i].bo,
724                                       &tables[i].mc_address,
725                                       &tables[i].cpu_addr);
726         }
727
728         return 0;
729 }
730
731 static int smu_override_pcie_parameters(struct smu_context *smu)
732 {
733         struct amdgpu_device *adev = smu->adev;
734         uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
735         int ret;
736
737         if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
738                 pcie_gen = 3;
739         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
740                 pcie_gen = 2;
741         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
742                 pcie_gen = 1;
743         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
744                 pcie_gen = 0;
745
746         /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
747          * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
748          * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
749          */
750         if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
751                 pcie_width = 6;
752         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
753                 pcie_width = 5;
754         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
755                 pcie_width = 4;
756         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
757                 pcie_width = 3;
758         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
759                 pcie_width = 2;
760         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
761                 pcie_width = 1;
762
763         smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
764         ret = smu_send_smc_msg_with_param(smu,
765                                           SMU_MSG_OverridePcieParameters,
766                                           smu_pcie_arg);
767         if (ret)
768                 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
769         return ret;
770 }
771
772 static int smu_smc_table_hw_init(struct smu_context *smu,
773                                  bool initialize)
774 {
775         struct amdgpu_device *adev = smu->adev;
776         int ret;
777
778         if (smu_is_dpm_running(smu) && adev->in_suspend) {
779                 pr_info("dpm has been enabled\n");
780                 return 0;
781         }
782
783         ret = smu_init_display(smu);
784         if (ret)
785                 return ret;
786
787         if (initialize) {
788                 /* get boot_values from vbios to set revision, gfxclk, and etc. */
789                 ret = smu_get_vbios_bootup_values(smu);
790                 if (ret)
791                         return ret;
792
793                 ret = smu_setup_pptable(smu);
794                 if (ret)
795                         return ret;
796
797                 /*
798                  * check if the format_revision in vbios is up to pptable header
799                  * version, and the structure size is not 0.
800                  */
801                 ret = smu_check_pptable(smu);
802                 if (ret)
803                         return ret;
804
805                 /*
806                  * allocate vram bos to store smc table contents.
807                  */
808                 ret = smu_init_fb_allocations(smu);
809                 if (ret)
810                         return ret;
811
812                 /*
813                  * Parse pptable format and fill PPTable_t smc_pptable to
814                  * smu_table_context structure. And read the smc_dpm_table from vbios,
815                  * then fill it into smc_pptable.
816                  */
817                 ret = smu_parse_pptable(smu);
818                 if (ret)
819                         return ret;
820
821                 /*
822                  * Send msg GetDriverIfVersion to check if the return value is equal
823                  * with DRIVER_IF_VERSION of smc header.
824                  */
825                 ret = smu_check_fw_version(smu);
826                 if (ret)
827                         return ret;
828         }
829
830         /*
831          * Copy pptable bo in the vram to smc with SMU MSGs such as
832          * SetDriverDramAddr and TransferTableDram2Smu.
833          */
834         ret = smu_write_pptable(smu);
835         if (ret)
836                 return ret;
837
838         /* issue RunAfllBtc msg */
839         ret = smu_run_afll_btc(smu);
840         if (ret)
841                 return ret;
842
843         ret = smu_feature_set_allowed_mask(smu);
844         if (ret)
845                 return ret;
846
847         ret = smu_system_features_control(smu, true);
848         if (ret)
849                 return ret;
850
851         ret = smu_override_pcie_parameters(smu);
852         if (ret)
853                 return ret;
854
855         ret = smu_notify_display_change(smu);
856         if (ret)
857                 return ret;
858
859         /*
860          * Set min deep sleep dce fclk with bootup value from vbios via
861          * SetMinDeepSleepDcefclk MSG.
862          */
863         ret = smu_set_min_dcef_deep_sleep(smu);
864         if (ret)
865                 return ret;
866
867         /*
868          * Set initialized values (get from vbios) to dpm tables context such as
869          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
870          * type of clks.
871          */
872         if (initialize) {
873                 ret = smu_populate_smc_pptable(smu);
874                 if (ret)
875                         return ret;
876
877                 ret = smu_init_max_sustainable_clocks(smu);
878                 if (ret)
879                         return ret;
880         }
881
882         ret = smu_set_od8_default_settings(smu, initialize);
883         if (ret)
884                 return ret;
885
886         if (initialize) {
887                 ret = smu_populate_umd_state_clk(smu);
888                 if (ret)
889                         return ret;
890
891                 ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
892                 if (ret)
893                         return ret;
894         }
895
896         /*
897          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
898          */
899         ret = smu_set_tool_table_location(smu);
900
901         if (!smu_is_dpm_running(smu))
902                 pr_info("dpm has been disabled\n");
903
904         return ret;
905 }
906
907 /**
908  * smu_alloc_memory_pool - allocate memory pool in the system memory
909  *
910  * @smu: amdgpu_device pointer
911  *
912  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
913  * and DramLogSetDramAddr can notify it changed.
914  *
915  * Returns 0 on success, error on failure.
916  */
917 static int smu_alloc_memory_pool(struct smu_context *smu)
918 {
919         struct amdgpu_device *adev = smu->adev;
920         struct smu_table_context *smu_table = &smu->smu_table;
921         struct smu_table *memory_pool = &smu_table->memory_pool;
922         uint64_t pool_size = smu->pool_size;
923         int ret = 0;
924
925         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
926                 return ret;
927
928         memory_pool->size = pool_size;
929         memory_pool->align = PAGE_SIZE;
930         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
931
932         switch (pool_size) {
933         case SMU_MEMORY_POOL_SIZE_256_MB:
934         case SMU_MEMORY_POOL_SIZE_512_MB:
935         case SMU_MEMORY_POOL_SIZE_1_GB:
936         case SMU_MEMORY_POOL_SIZE_2_GB:
937                 ret = amdgpu_bo_create_kernel(adev,
938                                               memory_pool->size,
939                                               memory_pool->align,
940                                               memory_pool->domain,
941                                               &memory_pool->bo,
942                                               &memory_pool->mc_address,
943                                               &memory_pool->cpu_addr);
944                 break;
945         default:
946                 break;
947         }
948
949         return ret;
950 }
951
952 static int smu_free_memory_pool(struct smu_context *smu)
953 {
954         struct smu_table_context *smu_table = &smu->smu_table;
955         struct smu_table *memory_pool = &smu_table->memory_pool;
956         int ret = 0;
957
958         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
959                 return ret;
960
961         amdgpu_bo_free_kernel(&memory_pool->bo,
962                               &memory_pool->mc_address,
963                               &memory_pool->cpu_addr);
964
965         memset(memory_pool, 0, sizeof(struct smu_table));
966
967         return ret;
968 }
969
970 static int smu_hw_init(void *handle)
971 {
972         int ret;
973         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
974         struct smu_context *smu = &adev->smu;
975
976         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
977                 ret = smu_check_fw_status(smu);
978                 if (ret) {
979                         pr_err("SMC firmware status is not correct\n");
980                         return ret;
981                 }
982         }
983
984         mutex_lock(&smu->mutex);
985
986         ret = smu_feature_init_dpm(smu);
987         if (ret)
988                 goto failed;
989
990         ret = smu_smc_table_hw_init(smu, true);
991         if (ret)
992                 goto failed;
993
994         ret = smu_alloc_memory_pool(smu);
995         if (ret)
996                 goto failed;
997
998         /*
999          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1000          * pool location.
1001          */
1002         ret = smu_notify_memory_pool_location(smu);
1003         if (ret)
1004                 goto failed;
1005
1006         ret = smu_start_thermal_control(smu);
1007         if (ret)
1008                 goto failed;
1009
1010         mutex_unlock(&smu->mutex);
1011
1012         if (!smu->pm_enabled)
1013                 adev->pm.dpm_enabled = false;
1014         else
1015                 adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1016
1017         pr_info("SMU is initialized successfully!\n");
1018
1019         return 0;
1020
1021 failed:
1022         mutex_unlock(&smu->mutex);
1023         return ret;
1024 }
1025
1026 static int smu_hw_fini(void *handle)
1027 {
1028         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1029         struct smu_context *smu = &adev->smu;
1030         struct smu_table_context *table_context = &smu->smu_table;
1031         int ret = 0;
1032
1033         kfree(table_context->driver_pptable);
1034         table_context->driver_pptable = NULL;
1035
1036         kfree(table_context->max_sustainable_clocks);
1037         table_context->max_sustainable_clocks = NULL;
1038
1039         kfree(table_context->od_feature_capabilities);
1040         table_context->od_feature_capabilities = NULL;
1041
1042         kfree(table_context->od_settings_max);
1043         table_context->od_settings_max = NULL;
1044
1045         kfree(table_context->od_settings_min);
1046         table_context->od_settings_min = NULL;
1047
1048         kfree(table_context->overdrive_table);
1049         table_context->overdrive_table = NULL;
1050
1051         kfree(table_context->od8_settings);
1052         table_context->od8_settings = NULL;
1053
1054         ret = smu_fini_fb_allocations(smu);
1055         if (ret)
1056                 return ret;
1057
1058         ret = smu_free_memory_pool(smu);
1059         if (ret)
1060                 return ret;
1061
1062         return 0;
1063 }
1064
1065 int smu_reset(struct smu_context *smu)
1066 {
1067         struct amdgpu_device *adev = smu->adev;
1068         int ret = 0;
1069
1070         ret = smu_hw_fini(adev);
1071         if (ret)
1072                 return ret;
1073
1074         ret = smu_hw_init(adev);
1075         if (ret)
1076                 return ret;
1077
1078         return ret;
1079 }
1080
1081 static int smu_suspend(void *handle)
1082 {
1083         int ret;
1084         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1085         struct smu_context *smu = &adev->smu;
1086
1087         ret = smu_system_features_control(smu, false);
1088         if (ret)
1089                 return ret;
1090
1091         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1092
1093         if (adev->asic_type >= CHIP_NAVI10 &&
1094             adev->gfx.rlc.funcs->stop)
1095                 adev->gfx.rlc.funcs->stop(adev);
1096
1097         return 0;
1098 }
1099
1100 static int smu_resume(void *handle)
1101 {
1102         int ret;
1103         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1104         struct smu_context *smu = &adev->smu;
1105
1106         pr_info("SMU is resuming...\n");
1107
1108         mutex_lock(&smu->mutex);
1109
1110         ret = smu_smc_table_hw_init(smu, false);
1111         if (ret)
1112                 goto failed;
1113
1114         ret = smu_start_thermal_control(smu);
1115         if (ret)
1116                 goto failed;
1117
1118         mutex_unlock(&smu->mutex);
1119
1120         pr_info("SMU is resumed successfully!\n");
1121
1122         return 0;
1123 failed:
1124         mutex_unlock(&smu->mutex);
1125         return ret;
1126 }
1127
1128 int smu_display_configuration_change(struct smu_context *smu,
1129                                      const struct amd_pp_display_configuration *display_config)
1130 {
1131         int index = 0;
1132         int num_of_active_display = 0;
1133
1134         if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1135                 return -EINVAL;
1136
1137         if (!display_config)
1138                 return -EINVAL;
1139
1140         mutex_lock(&smu->mutex);
1141
1142         smu_set_deep_sleep_dcefclk(smu,
1143                                    display_config->min_dcef_deep_sleep_set_clk / 100);
1144
1145         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1146                 if (display_config->displays[index].controller_id != 0)
1147                         num_of_active_display++;
1148         }
1149
1150         smu_set_active_display_count(smu, num_of_active_display);
1151
1152         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1153                            display_config->cpu_cc6_disable,
1154                            display_config->cpu_pstate_disable,
1155                            display_config->nb_pstate_switch_disable);
1156
1157         mutex_unlock(&smu->mutex);
1158
1159         return 0;
1160 }
1161
1162 static int smu_get_clock_info(struct smu_context *smu,
1163                               struct smu_clock_info *clk_info,
1164                               enum smu_perf_level_designation designation)
1165 {
1166         int ret;
1167         struct smu_performance_level level = {0};
1168
1169         if (!clk_info)
1170                 return -EINVAL;
1171
1172         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1173         if (ret)
1174                 return -EINVAL;
1175
1176         clk_info->min_mem_clk = level.memory_clock;
1177         clk_info->min_eng_clk = level.core_clock;
1178         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1179
1180         ret = smu_get_perf_level(smu, designation, &level);
1181         if (ret)
1182                 return -EINVAL;
1183
1184         clk_info->min_mem_clk = level.memory_clock;
1185         clk_info->min_eng_clk = level.core_clock;
1186         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1187
1188         return 0;
1189 }
1190
1191 int smu_get_current_clocks(struct smu_context *smu,
1192                            struct amd_pp_clock_info *clocks)
1193 {
1194         struct amd_pp_simple_clock_info simple_clocks = {0};
1195         struct smu_clock_info hw_clocks;
1196         int ret = 0;
1197
1198         if (!is_support_sw_smu(smu->adev))
1199                 return -EINVAL;
1200
1201         mutex_lock(&smu->mutex);
1202
1203         smu_get_dal_power_level(smu, &simple_clocks);
1204
1205         if (smu->support_power_containment)
1206                 ret = smu_get_clock_info(smu, &hw_clocks,
1207                                          PERF_LEVEL_POWER_CONTAINMENT);
1208         else
1209                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1210
1211         if (ret) {
1212                 pr_err("Error in smu_get_clock_info\n");
1213                 goto failed;
1214         }
1215
1216         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1217         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1218         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1219         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1220         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1221         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1222         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1223         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1224
1225         if (simple_clocks.level == 0)
1226                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1227         else
1228                 clocks->max_clocks_state = simple_clocks.level;
1229
1230         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1231                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1232                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1233         }
1234
1235 failed:
1236         mutex_unlock(&smu->mutex);
1237         return ret;
1238 }
1239
1240 static int smu_set_clockgating_state(void *handle,
1241                                      enum amd_clockgating_state state)
1242 {
1243         return 0;
1244 }
1245
1246 static int smu_set_powergating_state(void *handle,
1247                                      enum amd_powergating_state state)
1248 {
1249         return 0;
1250 }
1251
1252 static int smu_enable_umd_pstate(void *handle,
1253                       enum amd_dpm_forced_level *level)
1254 {
1255         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1256                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1257                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1258                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1259
1260         struct smu_context *smu = (struct smu_context*)(handle);
1261         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1262         if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
1263                 return -EINVAL;
1264
1265         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1266                 /* enter umd pstate, save current level, disable gfx cg*/
1267                 if (*level & profile_mode_mask) {
1268                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1269                         smu_dpm_ctx->enable_umd_pstate = true;
1270                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1271                                                                AMD_IP_BLOCK_TYPE_GFX,
1272                                                                AMD_CG_STATE_UNGATE);
1273                         amdgpu_device_ip_set_powergating_state(smu->adev,
1274                                                                AMD_IP_BLOCK_TYPE_GFX,
1275                                                                AMD_PG_STATE_UNGATE);
1276                 }
1277         } else {
1278                 /* exit umd pstate, restore level, enable gfx cg*/
1279                 if (!(*level & profile_mode_mask)) {
1280                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1281                                 *level = smu_dpm_ctx->saved_dpm_level;
1282                         smu_dpm_ctx->enable_umd_pstate = false;
1283                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1284                                                                AMD_IP_BLOCK_TYPE_GFX,
1285                                                                AMD_CG_STATE_GATE);
1286                         amdgpu_device_ip_set_powergating_state(smu->adev,
1287                                                                AMD_IP_BLOCK_TYPE_GFX,
1288                                                                AMD_PG_STATE_GATE);
1289                 }
1290         }
1291
1292         return 0;
1293 }
1294
1295 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1296                                    enum amd_dpm_forced_level level,
1297                                    bool skip_display_settings)
1298 {
1299         int ret = 0;
1300         int index = 0;
1301         uint32_t sclk_mask, mclk_mask, soc_mask;
1302         long workload;
1303         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1304
1305         if (!smu->pm_enabled)
1306                 return -EINVAL;
1307         if (!skip_display_settings) {
1308                 ret = smu_display_config_changed(smu);
1309                 if (ret) {
1310                         pr_err("Failed to change display config!");
1311                         return ret;
1312                 }
1313         }
1314
1315         if (!smu->pm_enabled)
1316                 return -EINVAL;
1317         ret = smu_apply_clocks_adjust_rules(smu);
1318         if (ret) {
1319                 pr_err("Failed to apply clocks adjust rules!");
1320                 return ret;
1321         }
1322
1323         if (!skip_display_settings) {
1324                 ret = smu_notify_smc_dispaly_config(smu);
1325                 if (ret) {
1326                         pr_err("Failed to notify smc display config!");
1327                         return ret;
1328                 }
1329         }
1330
1331         if (smu_dpm_ctx->dpm_level != level) {
1332                 switch (level) {
1333                 case AMD_DPM_FORCED_LEVEL_HIGH:
1334                         ret = smu_force_dpm_limit_value(smu, true);
1335                         break;
1336                 case AMD_DPM_FORCED_LEVEL_LOW:
1337                         ret = smu_force_dpm_limit_value(smu, false);
1338                         break;
1339
1340                 case AMD_DPM_FORCED_LEVEL_AUTO:
1341                         ret = smu_unforce_dpm_levels(smu);
1342                         break;
1343
1344                 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1345                 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1346                 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1347                 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1348                         ret = smu_get_profiling_clk_mask(smu, level,
1349                                                          &sclk_mask,
1350                                                          &mclk_mask,
1351                                                          &soc_mask);
1352                         if (ret)
1353                                 return ret;
1354                         smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
1355                         smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
1356                         break;
1357
1358                 case AMD_DPM_FORCED_LEVEL_MANUAL:
1359                 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1360                 default:
1361                         break;
1362                 }
1363
1364                 if (!ret)
1365                         smu_dpm_ctx->dpm_level = level;
1366         }
1367
1368         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1369                 index = fls(smu->workload_mask);
1370                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1371                 workload = smu->workload_setting[index];
1372
1373                 if (smu->power_profile_mode != workload)
1374                         smu_set_power_profile_mode(smu, &workload, 0);
1375         }
1376
1377         return ret;
1378 }
1379
1380 int smu_handle_task(struct smu_context *smu,
1381                     enum amd_dpm_forced_level level,
1382                     enum amd_pp_task task_id)
1383 {
1384         int ret = 0;
1385
1386         switch (task_id) {
1387         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1388                 ret = smu_pre_display_config_changed(smu);
1389                 if (ret)
1390                         return ret;
1391                 ret = smu_set_cpu_power_state(smu);
1392                 if (ret)
1393                         return ret;
1394                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1395                 break;
1396         case AMD_PP_TASK_COMPLETE_INIT:
1397         case AMD_PP_TASK_READJUST_POWER_STATE:
1398                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1399                 break;
1400         default:
1401                 break;
1402         }
1403
1404         return ret;
1405 }
1406
1407 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1408 {
1409         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1410
1411         if (!smu_dpm_ctx->dpm_context)
1412                 return -EINVAL;
1413
1414         mutex_lock(&(smu->mutex));
1415         if (smu_dpm_ctx->dpm_level != smu_dpm_ctx->saved_dpm_level) {
1416                 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1417         }
1418         mutex_unlock(&(smu->mutex));
1419
1420         return smu_dpm_ctx->dpm_level;
1421 }
1422
1423 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1424 {
1425         int ret = 0;
1426         int i;
1427         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1428
1429         if (!smu_dpm_ctx->dpm_context)
1430                 return -EINVAL;
1431
1432         for (i = 0; i < smu->adev->num_ip_blocks; i++) {
1433                 if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
1434                         break;
1435         }
1436
1437         mutex_lock(&smu->mutex);
1438
1439         smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
1440         ret = smu_handle_task(smu, level,
1441                               AMD_PP_TASK_READJUST_POWER_STATE);
1442
1443         mutex_unlock(&smu->mutex);
1444
1445         return ret;
1446 }
1447
1448 const struct amd_ip_funcs smu_ip_funcs = {
1449         .name = "smu",
1450         .early_init = smu_early_init,
1451         .late_init = smu_late_init,
1452         .sw_init = smu_sw_init,
1453         .sw_fini = smu_sw_fini,
1454         .hw_init = smu_hw_init,
1455         .hw_fini = smu_hw_fini,
1456         .suspend = smu_suspend,
1457         .resume = smu_resume,
1458         .is_idle = NULL,
1459         .check_soft_reset = NULL,
1460         .wait_for_idle = NULL,
1461         .soft_reset = NULL,
1462         .set_clockgating_state = smu_set_clockgating_state,
1463         .set_powergating_state = smu_set_powergating_state,
1464         .enable_umd_pstate = smu_enable_umd_pstate,
1465 };
1466
1467 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1468 {
1469         .type = AMD_IP_BLOCK_TYPE_SMC,
1470         .major = 11,
1471         .minor = 0,
1472         .rev = 0,
1473         .funcs = &smu_ip_funcs,
1474 };