]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drm/amd/powerplay: add thermal ctf support for navi10
[linux.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include "pp_debug.h"
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
29 #include "smu_v11_0.h"
30 #include "atom.h"
31 #include "amd_pcie.h"
32
33 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
34 {
35         int ret = 0;
36
37         if (!if_version && !smu_version)
38                 return -EINVAL;
39
40         if (if_version) {
41                 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
42                 if (ret)
43                         return ret;
44
45                 ret = smu_read_smc_arg(smu, if_version);
46                 if (ret)
47                         return ret;
48         }
49
50         if (smu_version) {
51                 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
52                 if (ret)
53                         return ret;
54
55                 ret = smu_read_smc_arg(smu, smu_version);
56                 if (ret)
57                         return ret;
58         }
59
60         return ret;
61 }
62
63 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
64                             uint32_t min, uint32_t max)
65 {
66         int ret = 0, clk_id = 0;
67         uint32_t param;
68
69         if (min <= 0 && max <= 0)
70                 return -EINVAL;
71
72         clk_id = smu_clk_get_index(smu, clk_type);
73         if (clk_id < 0)
74                 return clk_id;
75
76         if (max > 0) {
77                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
78                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
79                                                   param);
80                 if (ret)
81                         return ret;
82         }
83
84         if (min > 0) {
85                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
86                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
87                                                   param);
88                 if (ret)
89                         return ret;
90         }
91
92
93         return ret;
94 }
95
96 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
97                             uint32_t min, uint32_t max)
98 {
99         int ret = 0, clk_id = 0;
100         uint32_t param;
101
102         if (min <= 0 && max <= 0)
103                 return -EINVAL;
104
105         clk_id = smu_clk_get_index(smu, clk_type);
106         if (clk_id < 0)
107                 return clk_id;
108
109         if (max > 0) {
110                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
111                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
112                                                   param);
113                 if (ret)
114                         return ret;
115         }
116
117         if (min > 0) {
118                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
119                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
120                                                   param);
121                 if (ret)
122                         return ret;
123         }
124
125
126         return ret;
127 }
128
129 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
130                            uint32_t *min, uint32_t *max)
131 {
132         int ret = 0, clk_id = 0;
133         uint32_t param = 0;
134
135         if (!min && !max)
136                 return -EINVAL;
137
138         clk_id = smu_clk_get_index(smu, clk_type);
139         if (clk_id < 0)
140                 return clk_id;
141
142         param = (clk_id & 0xffff) << 16;
143
144         if (max) {
145                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
146                 if (ret)
147                         return ret;
148                 ret = smu_read_smc_arg(smu, max);
149                 if (ret)
150                         return ret;
151         }
152
153         if (min) {
154                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
155                 if (ret)
156                         return ret;
157                 ret = smu_read_smc_arg(smu, min);
158                 if (ret)
159                         return ret;
160         }
161
162         return ret;
163 }
164
165 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
166                               uint16_t level, uint32_t *value)
167 {
168         int ret = 0, clk_id = 0;
169         uint32_t param;
170
171         if (!value)
172                 return -EINVAL;
173
174         clk_id = smu_clk_get_index(smu, clk_type);
175         if (clk_id < 0)
176                 return clk_id;
177
178         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
179
180         ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
181                                           param);
182         if (ret)
183                 return ret;
184
185         ret = smu_read_smc_arg(smu, &param);
186         if (ret)
187                 return ret;
188
189         /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
190          * now, we un-support it */
191         *value = param & 0x7fffffff;
192
193         return ret;
194 }
195
196 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
197                             uint32_t *value)
198 {
199         return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
200 }
201
202 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
203                            bool gate)
204 {
205         int ret = 0;
206
207         switch (block_type) {
208         case AMD_IP_BLOCK_TYPE_UVD:
209                 ret = smu_dpm_set_uvd_enable(smu, gate);
210                 break;
211         case AMD_IP_BLOCK_TYPE_VCE:
212                 ret = smu_dpm_set_vce_enable(smu, gate);
213                 break;
214         default:
215                 break;
216         }
217
218         return ret;
219 }
220
221 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
222 {
223         /* not support power state */
224         return POWER_STATE_TYPE_DEFAULT;
225 }
226
227 int smu_get_power_num_states(struct smu_context *smu,
228                              struct pp_states_info *state_info)
229 {
230         if (!state_info)
231                 return -EINVAL;
232
233         /* not support power state */
234         memset(state_info, 0, sizeof(struct pp_states_info));
235         state_info->nums = 0;
236
237         return 0;
238 }
239
240 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
241                            void *data, uint32_t *size)
242 {
243         int ret = 0;
244
245         switch (sensor) {
246         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
247                 *((uint32_t *)data) = smu->pstate_sclk;
248                 *size = 4;
249                 break;
250         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
251                 *((uint32_t *)data) = smu->pstate_mclk;
252                 *size = 4;
253                 break;
254         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
255                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
256                 *size = 8;
257                 break;
258         case AMDGPU_PP_SENSOR_UVD_POWER:
259                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
260                 *size = 4;
261                 break;
262         case AMDGPU_PP_SENSOR_VCE_POWER:
263                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
264                 *size = 4;
265                 break;
266         default:
267                 ret = -EINVAL;
268                 break;
269         }
270
271         if (ret)
272                 *size = 0;
273
274         return ret;
275 }
276
277 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index,
278                      void *table_data, bool drv2smu)
279 {
280         struct smu_table_context *smu_table = &smu->smu_table;
281         struct smu_table *table = NULL;
282         int ret = 0;
283         int table_id = smu_table_get_index(smu, table_index);
284
285         if (!table_data || table_id >= smu_table->table_count)
286                 return -EINVAL;
287
288         table = &smu_table->tables[table_index];
289
290         if (drv2smu)
291                 memcpy(table->cpu_addr, table_data, table->size);
292
293         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
294                                           upper_32_bits(table->mc_address));
295         if (ret)
296                 return ret;
297         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
298                                           lower_32_bits(table->mc_address));
299         if (ret)
300                 return ret;
301         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
302                                           SMU_MSG_TransferTableDram2Smu :
303                                           SMU_MSG_TransferTableSmu2Dram,
304                                           table_id);
305         if (ret)
306                 return ret;
307
308         if (!drv2smu)
309                 memcpy(table_data, table->cpu_addr, table->size);
310
311         return ret;
312 }
313
314 bool is_support_sw_smu(struct amdgpu_device *adev)
315 {
316         if (adev->asic_type == CHIP_VEGA20)
317                 return (amdgpu_dpm == 2) ? true : false;
318         else if (adev->asic_type >= CHIP_NAVI10)
319                 return true;
320         else
321                 return false;
322 }
323
324 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
325 {
326         struct smu_table_context *smu_table = &smu->smu_table;
327
328         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
329                 return -EINVAL;
330
331         if (smu_table->hardcode_pptable)
332                 *table = smu_table->hardcode_pptable;
333         else
334                 *table = smu_table->power_play_table;
335
336         return smu_table->power_play_table_size;
337 }
338
339 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
340 {
341         struct smu_table_context *smu_table = &smu->smu_table;
342         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
343         int ret = 0;
344
345         if (!smu->pm_enabled)
346                 return -EINVAL;
347         if (header->usStructureSize != size) {
348                 pr_err("pp table size not matched !\n");
349                 return -EIO;
350         }
351
352         mutex_lock(&smu->mutex);
353         if (!smu_table->hardcode_pptable)
354                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
355         if (!smu_table->hardcode_pptable) {
356                 ret = -ENOMEM;
357                 goto failed;
358         }
359
360         memcpy(smu_table->hardcode_pptable, buf, size);
361         smu_table->power_play_table = smu_table->hardcode_pptable;
362         smu_table->power_play_table_size = size;
363         mutex_unlock(&smu->mutex);
364
365         ret = smu_reset(smu);
366         if (ret)
367                 pr_info("smu reset failed, ret = %d\n", ret);
368
369         return ret;
370
371 failed:
372         mutex_unlock(&smu->mutex);
373         return ret;
374 }
375
376 int smu_feature_init_dpm(struct smu_context *smu)
377 {
378         struct smu_feature *feature = &smu->smu_feature;
379         int ret = 0;
380         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
381
382         if (!smu->pm_enabled)
383                 return ret;
384         mutex_lock(&feature->mutex);
385         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
386         mutex_unlock(&feature->mutex);
387
388         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
389                                              SMU_FEATURE_MAX/32);
390         if (ret)
391                 return ret;
392
393         mutex_lock(&feature->mutex);
394         bitmap_or(feature->allowed, feature->allowed,
395                       (unsigned long *)allowed_feature_mask,
396                       feature->feature_num);
397         mutex_unlock(&feature->mutex);
398
399         return ret;
400 }
401
402 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
403 {
404         struct smu_feature *feature = &smu->smu_feature;
405         uint32_t feature_id;
406         int ret = 0;
407
408         feature_id = smu_feature_get_index(smu, mask);
409
410         WARN_ON(feature_id > feature->feature_num);
411
412         mutex_lock(&feature->mutex);
413         ret = test_bit(feature_id, feature->enabled);
414         mutex_unlock(&feature->mutex);
415
416         return ret;
417 }
418
419 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
420                             bool enable)
421 {
422         struct smu_feature *feature = &smu->smu_feature;
423         uint32_t feature_id;
424         int ret = 0;
425
426         feature_id = smu_feature_get_index(smu, mask);
427
428         WARN_ON(feature_id > feature->feature_num);
429
430         mutex_lock(&feature->mutex);
431         ret = smu_feature_update_enable_state(smu, feature_id, enable);
432         if (ret)
433                 goto failed;
434
435         if (enable)
436                 test_and_set_bit(feature_id, feature->enabled);
437         else
438                 test_and_clear_bit(feature_id, feature->enabled);
439
440 failed:
441         mutex_unlock(&feature->mutex);
442
443         return ret;
444 }
445
446 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
447 {
448         struct smu_feature *feature = &smu->smu_feature;
449         uint32_t feature_id;
450         int ret = 0;
451
452         feature_id = smu_feature_get_index(smu, mask);
453
454         WARN_ON(feature_id > feature->feature_num);
455
456         mutex_lock(&feature->mutex);
457         ret = test_bit(feature_id, feature->supported);
458         mutex_unlock(&feature->mutex);
459
460         return ret;
461 }
462
463 int smu_feature_set_supported(struct smu_context *smu,
464                               enum smu_feature_mask mask,
465                               bool enable)
466 {
467         struct smu_feature *feature = &smu->smu_feature;
468         uint32_t feature_id;
469         int ret = 0;
470
471         feature_id = smu_feature_get_index(smu, mask);
472
473         WARN_ON(feature_id > feature->feature_num);
474
475         mutex_lock(&feature->mutex);
476         if (enable)
477                 test_and_set_bit(feature_id, feature->supported);
478         else
479                 test_and_clear_bit(feature_id, feature->supported);
480         mutex_unlock(&feature->mutex);
481
482         return ret;
483 }
484
485 static int smu_set_funcs(struct amdgpu_device *adev)
486 {
487         struct smu_context *smu = &adev->smu;
488
489         switch (adev->asic_type) {
490         case CHIP_VEGA20:
491         case CHIP_NAVI10:
492                 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
493                         smu->od_enabled = true;
494                 smu_v11_0_set_smu_funcs(smu);
495                 break;
496         default:
497                 return -EINVAL;
498         }
499
500         return 0;
501 }
502
503 static int smu_early_init(void *handle)
504 {
505         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
506         struct smu_context *smu = &adev->smu;
507
508         smu->adev = adev;
509         smu->pm_enabled = !!amdgpu_dpm;
510         mutex_init(&smu->mutex);
511
512         return smu_set_funcs(adev);
513 }
514
515 static int smu_late_init(void *handle)
516 {
517         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
518         struct smu_context *smu = &adev->smu;
519
520         if (!smu->pm_enabled)
521                 return 0;
522         mutex_lock(&smu->mutex);
523         smu_handle_task(&adev->smu,
524                         smu->smu_dpm.dpm_level,
525                         AMD_PP_TASK_COMPLETE_INIT);
526         mutex_unlock(&smu->mutex);
527
528         return 0;
529 }
530
531 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
532                             uint16_t *size, uint8_t *frev, uint8_t *crev,
533                             uint8_t **addr)
534 {
535         struct amdgpu_device *adev = smu->adev;
536         uint16_t data_start;
537
538         if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
539                                            size, frev, crev, &data_start))
540                 return -EINVAL;
541
542         *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
543
544         return 0;
545 }
546
547 static int smu_initialize_pptable(struct smu_context *smu)
548 {
549         /* TODO */
550         return 0;
551 }
552
553 static int smu_smc_table_sw_init(struct smu_context *smu)
554 {
555         int ret;
556
557         ret = smu_initialize_pptable(smu);
558         if (ret) {
559                 pr_err("Failed to init smu_initialize_pptable!\n");
560                 return ret;
561         }
562
563         /**
564          * Create smu_table structure, and init smc tables such as
565          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
566          */
567         ret = smu_init_smc_tables(smu);
568         if (ret) {
569                 pr_err("Failed to init smc tables!\n");
570                 return ret;
571         }
572
573         /**
574          * Create smu_power_context structure, and allocate smu_dpm_context and
575          * context size to fill the smu_power_context data.
576          */
577         ret = smu_init_power(smu);
578         if (ret) {
579                 pr_err("Failed to init smu_init_power!\n");
580                 return ret;
581         }
582
583         return 0;
584 }
585
586 static int smu_smc_table_sw_fini(struct smu_context *smu)
587 {
588         int ret;
589
590         ret = smu_fini_smc_tables(smu);
591         if (ret) {
592                 pr_err("Failed to smu_fini_smc_tables!\n");
593                 return ret;
594         }
595
596         return 0;
597 }
598
599 static int smu_sw_init(void *handle)
600 {
601         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
602         struct smu_context *smu = &adev->smu;
603         int ret;
604
605         smu->pool_size = adev->pm.smu_prv_buffer_size;
606         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
607         mutex_init(&smu->smu_feature.mutex);
608         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
609         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
610         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
611         smu->watermarks_bitmap = 0;
612         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
613         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
614
615         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
616         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
617         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
618         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
619         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
620         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
621         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
622         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
623
624         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
625         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
626         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
627         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
628         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
629         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
630         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
631         smu->display_config = &adev->pm.pm_display_cfg;
632
633         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
634         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
635         ret = smu_init_microcode(smu);
636         if (ret) {
637                 pr_err("Failed to load smu firmware!\n");
638                 return ret;
639         }
640
641         ret = smu_smc_table_sw_init(smu);
642         if (ret) {
643                 pr_err("Failed to sw init smc table!\n");
644                 return ret;
645         }
646
647         return 0;
648 }
649
650 static int smu_sw_fini(void *handle)
651 {
652         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
653         struct smu_context *smu = &adev->smu;
654         int ret;
655
656         ret = smu_smc_table_sw_fini(smu);
657         if (ret) {
658                 pr_err("Failed to sw fini smc table!\n");
659                 return ret;
660         }
661
662         ret = smu_fini_power(smu);
663         if (ret) {
664                 pr_err("Failed to init smu_fini_power!\n");
665                 return ret;
666         }
667
668         return 0;
669 }
670
671 static int smu_init_fb_allocations(struct smu_context *smu)
672 {
673         struct amdgpu_device *adev = smu->adev;
674         struct smu_table_context *smu_table = &smu->smu_table;
675         struct smu_table *tables = smu_table->tables;
676         uint32_t table_count = smu_table->table_count;
677         uint32_t i = 0;
678         int32_t ret = 0;
679
680         if (table_count <= 0)
681                 return -EINVAL;
682
683         for (i = 0 ; i < table_count; i++) {
684                 if (tables[i].size == 0)
685                         continue;
686                 ret = amdgpu_bo_create_kernel(adev,
687                                               tables[i].size,
688                                               tables[i].align,
689                                               tables[i].domain,
690                                               &tables[i].bo,
691                                               &tables[i].mc_address,
692                                               &tables[i].cpu_addr);
693                 if (ret)
694                         goto failed;
695         }
696
697         return 0;
698 failed:
699         for (; i > 0; i--) {
700                 if (tables[i].size == 0)
701                         continue;
702                 amdgpu_bo_free_kernel(&tables[i].bo,
703                                       &tables[i].mc_address,
704                                       &tables[i].cpu_addr);
705
706         }
707         return ret;
708 }
709
710 static int smu_fini_fb_allocations(struct smu_context *smu)
711 {
712         struct smu_table_context *smu_table = &smu->smu_table;
713         struct smu_table *tables = smu_table->tables;
714         uint32_t table_count = smu_table->table_count;
715         uint32_t i = 0;
716
717         if (table_count == 0 || tables == NULL)
718                 return 0;
719
720         for (i = 0 ; i < table_count; i++) {
721                 if (tables[i].size == 0)
722                         continue;
723                 amdgpu_bo_free_kernel(&tables[i].bo,
724                                       &tables[i].mc_address,
725                                       &tables[i].cpu_addr);
726         }
727
728         return 0;
729 }
730
731 static int smu_override_pcie_parameters(struct smu_context *smu)
732 {
733         struct amdgpu_device *adev = smu->adev;
734         uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
735         int ret;
736
737         if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
738                 pcie_gen = 3;
739         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
740                 pcie_gen = 2;
741         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
742                 pcie_gen = 1;
743         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
744                 pcie_gen = 0;
745
746         /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
747          * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
748          * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
749          */
750         if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
751                 pcie_width = 6;
752         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
753                 pcie_width = 5;
754         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
755                 pcie_width = 4;
756         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
757                 pcie_width = 3;
758         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
759                 pcie_width = 2;
760         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
761                 pcie_width = 1;
762
763         smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
764         ret = smu_send_smc_msg_with_param(smu,
765                                           SMU_MSG_OverridePcieParameters,
766                                           smu_pcie_arg);
767         if (ret)
768                 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
769         return ret;
770 }
771
772 static int smu_smc_table_hw_init(struct smu_context *smu,
773                                  bool initialize)
774 {
775         struct amdgpu_device *adev = smu->adev;
776         int ret;
777
778         if (smu_is_dpm_running(smu) && adev->in_suspend) {
779                 pr_info("dpm has been enabled\n");
780                 return 0;
781         }
782
783         ret = smu_init_display(smu);
784         if (ret)
785                 return ret;
786
787         if (initialize) {
788                 /* get boot_values from vbios to set revision, gfxclk, and etc. */
789                 ret = smu_get_vbios_bootup_values(smu);
790                 if (ret)
791                         return ret;
792
793                 ret = smu_setup_pptable(smu);
794                 if (ret)
795                         return ret;
796
797                 /*
798                  * check if the format_revision in vbios is up to pptable header
799                  * version, and the structure size is not 0.
800                  */
801                 ret = smu_check_pptable(smu);
802                 if (ret)
803                         return ret;
804
805                 /*
806                  * allocate vram bos to store smc table contents.
807                  */
808                 ret = smu_init_fb_allocations(smu);
809                 if (ret)
810                         return ret;
811
812                 /*
813                  * Parse pptable format and fill PPTable_t smc_pptable to
814                  * smu_table_context structure. And read the smc_dpm_table from vbios,
815                  * then fill it into smc_pptable.
816                  */
817                 ret = smu_parse_pptable(smu);
818                 if (ret)
819                         return ret;
820
821                 /*
822                  * Send msg GetDriverIfVersion to check if the return value is equal
823                  * with DRIVER_IF_VERSION of smc header.
824                  */
825                 ret = smu_check_fw_version(smu);
826                 if (ret)
827                         return ret;
828         }
829
830         /*
831          * Copy pptable bo in the vram to smc with SMU MSGs such as
832          * SetDriverDramAddr and TransferTableDram2Smu.
833          */
834         ret = smu_write_pptable(smu);
835         if (ret)
836                 return ret;
837
838         /* issue RunAfllBtc msg */
839         ret = smu_run_afll_btc(smu);
840         if (ret)
841                 return ret;
842
843         ret = smu_feature_set_allowed_mask(smu);
844         if (ret)
845                 return ret;
846
847         ret = smu_system_features_control(smu, true);
848         if (ret)
849                 return ret;
850
851         ret = smu_override_pcie_parameters(smu);
852         if (ret)
853                 return ret;
854
855         ret = smu_notify_display_change(smu);
856         if (ret)
857                 return ret;
858
859         /*
860          * Set min deep sleep dce fclk with bootup value from vbios via
861          * SetMinDeepSleepDcefclk MSG.
862          */
863         ret = smu_set_min_dcef_deep_sleep(smu);
864         if (ret)
865                 return ret;
866
867         /*
868          * Set initialized values (get from vbios) to dpm tables context such as
869          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
870          * type of clks.
871          */
872         if (initialize) {
873                 ret = smu_populate_smc_pptable(smu);
874                 if (ret)
875                         return ret;
876
877                 ret = smu_init_max_sustainable_clocks(smu);
878                 if (ret)
879                         return ret;
880         }
881
882         ret = smu_set_od8_default_settings(smu, initialize);
883         if (ret)
884                 return ret;
885
886         if (initialize) {
887                 ret = smu_populate_umd_state_clk(smu);
888                 if (ret)
889                         return ret;
890
891                 ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
892                 if (ret)
893                         return ret;
894         }
895
896         /*
897          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
898          */
899         ret = smu_set_tool_table_location(smu);
900
901         if (!smu_is_dpm_running(smu))
902                 pr_info("dpm has been disabled\n");
903
904         return ret;
905 }
906
907 /**
908  * smu_alloc_memory_pool - allocate memory pool in the system memory
909  *
910  * @smu: amdgpu_device pointer
911  *
912  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
913  * and DramLogSetDramAddr can notify it changed.
914  *
915  * Returns 0 on success, error on failure.
916  */
917 static int smu_alloc_memory_pool(struct smu_context *smu)
918 {
919         struct amdgpu_device *adev = smu->adev;
920         struct smu_table_context *smu_table = &smu->smu_table;
921         struct smu_table *memory_pool = &smu_table->memory_pool;
922         uint64_t pool_size = smu->pool_size;
923         int ret = 0;
924
925         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
926                 return ret;
927
928         memory_pool->size = pool_size;
929         memory_pool->align = PAGE_SIZE;
930         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
931
932         switch (pool_size) {
933         case SMU_MEMORY_POOL_SIZE_256_MB:
934         case SMU_MEMORY_POOL_SIZE_512_MB:
935         case SMU_MEMORY_POOL_SIZE_1_GB:
936         case SMU_MEMORY_POOL_SIZE_2_GB:
937                 ret = amdgpu_bo_create_kernel(adev,
938                                               memory_pool->size,
939                                               memory_pool->align,
940                                               memory_pool->domain,
941                                               &memory_pool->bo,
942                                               &memory_pool->mc_address,
943                                               &memory_pool->cpu_addr);
944                 break;
945         default:
946                 break;
947         }
948
949         return ret;
950 }
951
952 static int smu_free_memory_pool(struct smu_context *smu)
953 {
954         struct smu_table_context *smu_table = &smu->smu_table;
955         struct smu_table *memory_pool = &smu_table->memory_pool;
956         int ret = 0;
957
958         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
959                 return ret;
960
961         amdgpu_bo_free_kernel(&memory_pool->bo,
962                               &memory_pool->mc_address,
963                               &memory_pool->cpu_addr);
964
965         memset(memory_pool, 0, sizeof(struct smu_table));
966
967         return ret;
968 }
969
970 static int smu_hw_init(void *handle)
971 {
972         int ret;
973         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
974         struct smu_context *smu = &adev->smu;
975
976         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
977                 ret = smu_check_fw_status(smu);
978                 if (ret) {
979                         pr_err("SMC firmware status is not correct\n");
980                         return ret;
981                 }
982         }
983
984         mutex_lock(&smu->mutex);
985
986         ret = smu_feature_init_dpm(smu);
987         if (ret)
988                 goto failed;
989
990         ret = smu_smc_table_hw_init(smu, true);
991         if (ret)
992                 goto failed;
993
994         ret = smu_alloc_memory_pool(smu);
995         if (ret)
996                 goto failed;
997
998         /*
999          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1000          * pool location.
1001          */
1002         ret = smu_notify_memory_pool_location(smu);
1003         if (ret)
1004                 goto failed;
1005
1006         ret = smu_start_thermal_control(smu);
1007         if (ret)
1008                 goto failed;
1009
1010         ret = smu_register_irq_handler(smu);
1011         if (ret)
1012                 goto failed;
1013
1014         mutex_unlock(&smu->mutex);
1015
1016         if (!smu->pm_enabled)
1017                 adev->pm.dpm_enabled = false;
1018         else
1019                 adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1020
1021         pr_info("SMU is initialized successfully!\n");
1022
1023         return 0;
1024
1025 failed:
1026         mutex_unlock(&smu->mutex);
1027         return ret;
1028 }
1029
1030 static int smu_hw_fini(void *handle)
1031 {
1032         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1033         struct smu_context *smu = &adev->smu;
1034         struct smu_table_context *table_context = &smu->smu_table;
1035         int ret = 0;
1036
1037         kfree(table_context->driver_pptable);
1038         table_context->driver_pptable = NULL;
1039
1040         kfree(table_context->max_sustainable_clocks);
1041         table_context->max_sustainable_clocks = NULL;
1042
1043         kfree(table_context->od_feature_capabilities);
1044         table_context->od_feature_capabilities = NULL;
1045
1046         kfree(table_context->od_settings_max);
1047         table_context->od_settings_max = NULL;
1048
1049         kfree(table_context->od_settings_min);
1050         table_context->od_settings_min = NULL;
1051
1052         kfree(table_context->overdrive_table);
1053         table_context->overdrive_table = NULL;
1054
1055         kfree(table_context->od8_settings);
1056         table_context->od8_settings = NULL;
1057
1058         kfree(smu->irq_source);
1059         smu->irq_source = NULL;
1060
1061         ret = smu_fini_fb_allocations(smu);
1062         if (ret)
1063                 return ret;
1064
1065         ret = smu_free_memory_pool(smu);
1066         if (ret)
1067                 return ret;
1068
1069         return 0;
1070 }
1071
1072 int smu_reset(struct smu_context *smu)
1073 {
1074         struct amdgpu_device *adev = smu->adev;
1075         int ret = 0;
1076
1077         ret = smu_hw_fini(adev);
1078         if (ret)
1079                 return ret;
1080
1081         ret = smu_hw_init(adev);
1082         if (ret)
1083                 return ret;
1084
1085         return ret;
1086 }
1087
1088 static int smu_suspend(void *handle)
1089 {
1090         int ret;
1091         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1092         struct smu_context *smu = &adev->smu;
1093
1094         ret = smu_system_features_control(smu, false);
1095         if (ret)
1096                 return ret;
1097
1098         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1099
1100         if (adev->asic_type >= CHIP_NAVI10 &&
1101             adev->gfx.rlc.funcs->stop)
1102                 adev->gfx.rlc.funcs->stop(adev);
1103
1104         return 0;
1105 }
1106
1107 static int smu_resume(void *handle)
1108 {
1109         int ret;
1110         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1111         struct smu_context *smu = &adev->smu;
1112
1113         pr_info("SMU is resuming...\n");
1114
1115         mutex_lock(&smu->mutex);
1116
1117         ret = smu_smc_table_hw_init(smu, false);
1118         if (ret)
1119                 goto failed;
1120
1121         ret = smu_start_thermal_control(smu);
1122         if (ret)
1123                 goto failed;
1124
1125         mutex_unlock(&smu->mutex);
1126
1127         pr_info("SMU is resumed successfully!\n");
1128
1129         return 0;
1130 failed:
1131         mutex_unlock(&smu->mutex);
1132         return ret;
1133 }
1134
1135 int smu_display_configuration_change(struct smu_context *smu,
1136                                      const struct amd_pp_display_configuration *display_config)
1137 {
1138         int index = 0;
1139         int num_of_active_display = 0;
1140
1141         if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1142                 return -EINVAL;
1143
1144         if (!display_config)
1145                 return -EINVAL;
1146
1147         mutex_lock(&smu->mutex);
1148
1149         smu_set_deep_sleep_dcefclk(smu,
1150                                    display_config->min_dcef_deep_sleep_set_clk / 100);
1151
1152         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1153                 if (display_config->displays[index].controller_id != 0)
1154                         num_of_active_display++;
1155         }
1156
1157         smu_set_active_display_count(smu, num_of_active_display);
1158
1159         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1160                            display_config->cpu_cc6_disable,
1161                            display_config->cpu_pstate_disable,
1162                            display_config->nb_pstate_switch_disable);
1163
1164         mutex_unlock(&smu->mutex);
1165
1166         return 0;
1167 }
1168
1169 static int smu_get_clock_info(struct smu_context *smu,
1170                               struct smu_clock_info *clk_info,
1171                               enum smu_perf_level_designation designation)
1172 {
1173         int ret;
1174         struct smu_performance_level level = {0};
1175
1176         if (!clk_info)
1177                 return -EINVAL;
1178
1179         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1180         if (ret)
1181                 return -EINVAL;
1182
1183         clk_info->min_mem_clk = level.memory_clock;
1184         clk_info->min_eng_clk = level.core_clock;
1185         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1186
1187         ret = smu_get_perf_level(smu, designation, &level);
1188         if (ret)
1189                 return -EINVAL;
1190
1191         clk_info->min_mem_clk = level.memory_clock;
1192         clk_info->min_eng_clk = level.core_clock;
1193         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1194
1195         return 0;
1196 }
1197
1198 int smu_get_current_clocks(struct smu_context *smu,
1199                            struct amd_pp_clock_info *clocks)
1200 {
1201         struct amd_pp_simple_clock_info simple_clocks = {0};
1202         struct smu_clock_info hw_clocks;
1203         int ret = 0;
1204
1205         if (!is_support_sw_smu(smu->adev))
1206                 return -EINVAL;
1207
1208         mutex_lock(&smu->mutex);
1209
1210         smu_get_dal_power_level(smu, &simple_clocks);
1211
1212         if (smu->support_power_containment)
1213                 ret = smu_get_clock_info(smu, &hw_clocks,
1214                                          PERF_LEVEL_POWER_CONTAINMENT);
1215         else
1216                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1217
1218         if (ret) {
1219                 pr_err("Error in smu_get_clock_info\n");
1220                 goto failed;
1221         }
1222
1223         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1224         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1225         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1226         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1227         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1228         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1229         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1230         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1231
1232         if (simple_clocks.level == 0)
1233                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1234         else
1235                 clocks->max_clocks_state = simple_clocks.level;
1236
1237         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1238                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1239                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1240         }
1241
1242 failed:
1243         mutex_unlock(&smu->mutex);
1244         return ret;
1245 }
1246
1247 static int smu_set_clockgating_state(void *handle,
1248                                      enum amd_clockgating_state state)
1249 {
1250         return 0;
1251 }
1252
1253 static int smu_set_powergating_state(void *handle,
1254                                      enum amd_powergating_state state)
1255 {
1256         return 0;
1257 }
1258
1259 static int smu_enable_umd_pstate(void *handle,
1260                       enum amd_dpm_forced_level *level)
1261 {
1262         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1263                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1264                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1265                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1266
1267         struct smu_context *smu = (struct smu_context*)(handle);
1268         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1269         if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
1270                 return -EINVAL;
1271
1272         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1273                 /* enter umd pstate, save current level, disable gfx cg*/
1274                 if (*level & profile_mode_mask) {
1275                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1276                         smu_dpm_ctx->enable_umd_pstate = true;
1277                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1278                                                                AMD_IP_BLOCK_TYPE_GFX,
1279                                                                AMD_CG_STATE_UNGATE);
1280                         amdgpu_device_ip_set_powergating_state(smu->adev,
1281                                                                AMD_IP_BLOCK_TYPE_GFX,
1282                                                                AMD_PG_STATE_UNGATE);
1283                 }
1284         } else {
1285                 /* exit umd pstate, restore level, enable gfx cg*/
1286                 if (!(*level & profile_mode_mask)) {
1287                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1288                                 *level = smu_dpm_ctx->saved_dpm_level;
1289                         smu_dpm_ctx->enable_umd_pstate = false;
1290                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1291                                                                AMD_IP_BLOCK_TYPE_GFX,
1292                                                                AMD_CG_STATE_GATE);
1293                         amdgpu_device_ip_set_powergating_state(smu->adev,
1294                                                                AMD_IP_BLOCK_TYPE_GFX,
1295                                                                AMD_PG_STATE_GATE);
1296                 }
1297         }
1298
1299         return 0;
1300 }
1301
1302 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1303                                    enum amd_dpm_forced_level level,
1304                                    bool skip_display_settings)
1305 {
1306         int ret = 0;
1307         int index = 0;
1308         uint32_t sclk_mask, mclk_mask, soc_mask;
1309         long workload;
1310         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1311
1312         if (!smu->pm_enabled)
1313                 return -EINVAL;
1314         if (!skip_display_settings) {
1315                 ret = smu_display_config_changed(smu);
1316                 if (ret) {
1317                         pr_err("Failed to change display config!");
1318                         return ret;
1319                 }
1320         }
1321
1322         if (!smu->pm_enabled)
1323                 return -EINVAL;
1324         ret = smu_apply_clocks_adjust_rules(smu);
1325         if (ret) {
1326                 pr_err("Failed to apply clocks adjust rules!");
1327                 return ret;
1328         }
1329
1330         if (!skip_display_settings) {
1331                 ret = smu_notify_smc_dispaly_config(smu);
1332                 if (ret) {
1333                         pr_err("Failed to notify smc display config!");
1334                         return ret;
1335                 }
1336         }
1337
1338         if (smu_dpm_ctx->dpm_level != level) {
1339                 switch (level) {
1340                 case AMD_DPM_FORCED_LEVEL_HIGH:
1341                         ret = smu_force_dpm_limit_value(smu, true);
1342                         break;
1343                 case AMD_DPM_FORCED_LEVEL_LOW:
1344                         ret = smu_force_dpm_limit_value(smu, false);
1345                         break;
1346
1347                 case AMD_DPM_FORCED_LEVEL_AUTO:
1348                         ret = smu_unforce_dpm_levels(smu);
1349                         break;
1350
1351                 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1352                 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1353                 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1354                 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1355                         ret = smu_get_profiling_clk_mask(smu, level,
1356                                                          &sclk_mask,
1357                                                          &mclk_mask,
1358                                                          &soc_mask);
1359                         if (ret)
1360                                 return ret;
1361                         smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
1362                         smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
1363                         break;
1364
1365                 case AMD_DPM_FORCED_LEVEL_MANUAL:
1366                 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1367                 default:
1368                         break;
1369                 }
1370
1371                 if (!ret)
1372                         smu_dpm_ctx->dpm_level = level;
1373         }
1374
1375         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1376                 index = fls(smu->workload_mask);
1377                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1378                 workload = smu->workload_setting[index];
1379
1380                 if (smu->power_profile_mode != workload)
1381                         smu_set_power_profile_mode(smu, &workload, 0);
1382         }
1383
1384         return ret;
1385 }
1386
1387 int smu_handle_task(struct smu_context *smu,
1388                     enum amd_dpm_forced_level level,
1389                     enum amd_pp_task task_id)
1390 {
1391         int ret = 0;
1392
1393         switch (task_id) {
1394         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1395                 ret = smu_pre_display_config_changed(smu);
1396                 if (ret)
1397                         return ret;
1398                 ret = smu_set_cpu_power_state(smu);
1399                 if (ret)
1400                         return ret;
1401                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1402                 break;
1403         case AMD_PP_TASK_COMPLETE_INIT:
1404         case AMD_PP_TASK_READJUST_POWER_STATE:
1405                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1406                 break;
1407         default:
1408                 break;
1409         }
1410
1411         return ret;
1412 }
1413
1414 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1415 {
1416         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1417
1418         if (!smu_dpm_ctx->dpm_context)
1419                 return -EINVAL;
1420
1421         mutex_lock(&(smu->mutex));
1422         if (smu_dpm_ctx->dpm_level != smu_dpm_ctx->saved_dpm_level) {
1423                 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1424         }
1425         mutex_unlock(&(smu->mutex));
1426
1427         return smu_dpm_ctx->dpm_level;
1428 }
1429
1430 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1431 {
1432         int ret = 0;
1433         int i;
1434         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1435
1436         if (!smu_dpm_ctx->dpm_context)
1437                 return -EINVAL;
1438
1439         for (i = 0; i < smu->adev->num_ip_blocks; i++) {
1440                 if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
1441                         break;
1442         }
1443
1444         mutex_lock(&smu->mutex);
1445
1446         smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
1447         ret = smu_handle_task(smu, level,
1448                               AMD_PP_TASK_READJUST_POWER_STATE);
1449
1450         mutex_unlock(&smu->mutex);
1451
1452         return ret;
1453 }
1454
1455 const struct amd_ip_funcs smu_ip_funcs = {
1456         .name = "smu",
1457         .early_init = smu_early_init,
1458         .late_init = smu_late_init,
1459         .sw_init = smu_sw_init,
1460         .sw_fini = smu_sw_fini,
1461         .hw_init = smu_hw_init,
1462         .hw_fini = smu_hw_fini,
1463         .suspend = smu_suspend,
1464         .resume = smu_resume,
1465         .is_idle = NULL,
1466         .check_soft_reset = NULL,
1467         .wait_for_idle = NULL,
1468         .soft_reset = NULL,
1469         .set_clockgating_state = smu_set_clockgating_state,
1470         .set_powergating_state = smu_set_powergating_state,
1471         .enable_umd_pstate = smu_enable_umd_pstate,
1472 };
1473
1474 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1475 {
1476         .type = AMD_IP_BLOCK_TYPE_SMC,
1477         .major = 11,
1478         .minor = 0,
1479         .rev = 0,
1480         .funcs = &smu_ip_funcs,
1481 };