]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drm/amd/powerplay: add helper function of smu_set_hard_freq_range
[linux.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include "pp_debug.h"
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
29 #include "smu_v11_0.h"
30 #include "atom.h"
31 #include "amd_pcie.h"
32
33 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
34 {
35         int ret = 0;
36
37         if (!if_version && !smu_version)
38                 return -EINVAL;
39
40         if (if_version) {
41                 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
42                 if (ret)
43                         return ret;
44
45                 ret = smu_read_smc_arg(smu, if_version);
46                 if (ret)
47                         return ret;
48         }
49
50         if (smu_version) {
51                 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
52                 if (ret)
53                         return ret;
54
55                 ret = smu_read_smc_arg(smu, smu_version);
56                 if (ret)
57                         return ret;
58         }
59
60         return ret;
61 }
62
63 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
64                             uint32_t min, uint32_t max)
65 {
66         int ret = 0, clk_id = 0;
67         uint32_t param;
68
69         if (min <= 0 && max <= 0)
70                 return -EINVAL;
71
72         clk_id = smu_clk_get_index(smu, clk_type);
73         if (clk_id < 0)
74                 return clk_id;
75
76         if (max > 0) {
77                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
78                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
79                                                   param);
80                 if (ret)
81                         return ret;
82         }
83
84         if (min > 0) {
85                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
86                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
87                                                   param);
88                 if (ret)
89                         return ret;
90         }
91
92
93         return ret;
94 }
95
96 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
97                             uint32_t min, uint32_t max)
98 {
99         int ret = 0, clk_id = 0;
100         uint32_t param;
101
102         if (min <= 0 && max <= 0)
103                 return -EINVAL;
104
105         clk_id = smu_clk_get_index(smu, clk_type);
106         if (clk_id < 0)
107                 return clk_id;
108
109         if (max > 0) {
110                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
111                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
112                                                   param);
113                 if (ret)
114                         return ret;
115         }
116
117         if (min > 0) {
118                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
119                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
120                                                   param);
121                 if (ret)
122                         return ret;
123         }
124
125
126         return ret;
127 }
128
129 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
130                            uint32_t *min, uint32_t *max)
131 {
132         int ret = 0, clk_id = 0;
133         uint32_t param = 0;
134
135         if (!min && !max)
136                 return -EINVAL;
137
138         clk_id = smu_clk_get_index(smu, clk_type);
139         if (clk_id < 0)
140                 return clk_id;
141
142         param = (clk_id & 0xffff) << 16;
143
144         if (max) {
145                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
146                 if (ret)
147                         return ret;
148                 ret = smu_read_smc_arg(smu, max);
149                 if (ret)
150                         return ret;
151         }
152
153         if (min) {
154                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
155                 if (ret)
156                         return ret;
157                 ret = smu_read_smc_arg(smu, min);
158                 if (ret)
159                         return ret;
160         }
161
162         return ret;
163 }
164
165 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
166                               uint16_t level, uint32_t *value)
167 {
168         int ret = 0, clk_id = 0;
169         uint32_t param;
170
171         if (!value)
172                 return -EINVAL;
173
174         clk_id = smu_clk_get_index(smu, clk_type);
175         if (clk_id < 0)
176                 return clk_id;
177
178         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
179
180         ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
181                                           param);
182         if (ret)
183                 return ret;
184
185         ret = smu_read_smc_arg(smu, &param);
186         if (ret)
187                 return ret;
188
189         /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
190          * now, we un-support it */
191         *value = param & 0x7fffffff;
192
193         return ret;
194 }
195
196 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
197                             uint32_t *value)
198 {
199         return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
200 }
201
202 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
203                            bool gate)
204 {
205         int ret = 0;
206
207         switch (block_type) {
208         case AMD_IP_BLOCK_TYPE_UVD:
209                 ret = smu_dpm_set_uvd_enable(smu, gate);
210                 break;
211         case AMD_IP_BLOCK_TYPE_VCE:
212                 ret = smu_dpm_set_vce_enable(smu, gate);
213                 break;
214         default:
215                 break;
216         }
217
218         return ret;
219 }
220
221 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
222 {
223         /* not support power state */
224         return POWER_STATE_TYPE_DEFAULT;
225 }
226
227 int smu_get_power_num_states(struct smu_context *smu,
228                              struct pp_states_info *state_info)
229 {
230         if (!state_info)
231                 return -EINVAL;
232
233         /* not support power state */
234         memset(state_info, 0, sizeof(struct pp_states_info));
235         state_info->nums = 0;
236
237         return 0;
238 }
239
240 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
241                            void *data, uint32_t *size)
242 {
243         int ret = 0;
244
245         switch (sensor) {
246         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
247                 *((uint32_t *)data) = smu->pstate_sclk;
248                 *size = 4;
249                 break;
250         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
251                 *((uint32_t *)data) = smu->pstate_mclk;
252                 *size = 4;
253                 break;
254         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
255                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
256                 *size = 8;
257                 break;
258         default:
259                 ret = -EINVAL;
260                 break;
261         }
262
263         if (ret)
264                 *size = 0;
265
266         return ret;
267 }
268
269 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index,
270                      void *table_data, bool drv2smu)
271 {
272         struct smu_table_context *smu_table = &smu->smu_table;
273         struct smu_table *table = NULL;
274         int ret = 0;
275         int table_id = smu_table_get_index(smu, table_index);
276
277         if (!table_data || table_id >= smu_table->table_count)
278                 return -EINVAL;
279
280         table = &smu_table->tables[table_index];
281
282         if (drv2smu)
283                 memcpy(table->cpu_addr, table_data, table->size);
284
285         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
286                                           upper_32_bits(table->mc_address));
287         if (ret)
288                 return ret;
289         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
290                                           lower_32_bits(table->mc_address));
291         if (ret)
292                 return ret;
293         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
294                                           SMU_MSG_TransferTableDram2Smu :
295                                           SMU_MSG_TransferTableSmu2Dram,
296                                           table_id);
297         if (ret)
298                 return ret;
299
300         if (!drv2smu)
301                 memcpy(table_data, table->cpu_addr, table->size);
302
303         return ret;
304 }
305
306 bool is_support_sw_smu(struct amdgpu_device *adev)
307 {
308         if (adev->asic_type == CHIP_VEGA20)
309                 return (amdgpu_dpm == 2) ? true : false;
310         else if (adev->asic_type >= CHIP_NAVI10)
311                 return true;
312         else
313                 return false;
314 }
315
316 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
317 {
318         struct smu_table_context *smu_table = &smu->smu_table;
319
320         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
321                 return -EINVAL;
322
323         if (smu_table->hardcode_pptable)
324                 *table = smu_table->hardcode_pptable;
325         else
326                 *table = smu_table->power_play_table;
327
328         return smu_table->power_play_table_size;
329 }
330
331 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
332 {
333         struct smu_table_context *smu_table = &smu->smu_table;
334         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
335         int ret = 0;
336
337         if (!smu->pm_enabled)
338                 return -EINVAL;
339         if (header->usStructureSize != size) {
340                 pr_err("pp table size not matched !\n");
341                 return -EIO;
342         }
343
344         mutex_lock(&smu->mutex);
345         if (!smu_table->hardcode_pptable)
346                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
347         if (!smu_table->hardcode_pptable) {
348                 ret = -ENOMEM;
349                 goto failed;
350         }
351
352         memcpy(smu_table->hardcode_pptable, buf, size);
353         smu_table->power_play_table = smu_table->hardcode_pptable;
354         smu_table->power_play_table_size = size;
355         mutex_unlock(&smu->mutex);
356
357         ret = smu_reset(smu);
358         if (ret)
359                 pr_info("smu reset failed, ret = %d\n", ret);
360
361         return ret;
362
363 failed:
364         mutex_unlock(&smu->mutex);
365         return ret;
366 }
367
368 int smu_feature_init_dpm(struct smu_context *smu)
369 {
370         struct smu_feature *feature = &smu->smu_feature;
371         int ret = 0;
372         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
373
374         if (!smu->pm_enabled)
375                 return ret;
376         mutex_lock(&feature->mutex);
377         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
378         mutex_unlock(&feature->mutex);
379
380         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
381                                              SMU_FEATURE_MAX/32);
382         if (ret)
383                 return ret;
384
385         mutex_lock(&feature->mutex);
386         bitmap_or(feature->allowed, feature->allowed,
387                       (unsigned long *)allowed_feature_mask,
388                       feature->feature_num);
389         mutex_unlock(&feature->mutex);
390
391         return ret;
392 }
393
394 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
395 {
396         struct smu_feature *feature = &smu->smu_feature;
397         uint32_t feature_id;
398         int ret = 0;
399
400         feature_id = smu_feature_get_index(smu, mask);
401
402         WARN_ON(feature_id > feature->feature_num);
403
404         mutex_lock(&feature->mutex);
405         ret = test_bit(feature_id, feature->enabled);
406         mutex_unlock(&feature->mutex);
407
408         return ret;
409 }
410
411 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
412                             bool enable)
413 {
414         struct smu_feature *feature = &smu->smu_feature;
415         uint32_t feature_id;
416         int ret = 0;
417
418         feature_id = smu_feature_get_index(smu, mask);
419
420         WARN_ON(feature_id > feature->feature_num);
421
422         mutex_lock(&feature->mutex);
423         ret = smu_feature_update_enable_state(smu, feature_id, enable);
424         if (ret)
425                 goto failed;
426
427         if (enable)
428                 test_and_set_bit(feature_id, feature->enabled);
429         else
430                 test_and_clear_bit(feature_id, feature->enabled);
431
432 failed:
433         mutex_unlock(&feature->mutex);
434
435         return ret;
436 }
437
438 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
439 {
440         struct smu_feature *feature = &smu->smu_feature;
441         uint32_t feature_id;
442         int ret = 0;
443
444         feature_id = smu_feature_get_index(smu, mask);
445
446         WARN_ON(feature_id > feature->feature_num);
447
448         mutex_lock(&feature->mutex);
449         ret = test_bit(feature_id, feature->supported);
450         mutex_unlock(&feature->mutex);
451
452         return ret;
453 }
454
455 int smu_feature_set_supported(struct smu_context *smu,
456                               enum smu_feature_mask mask,
457                               bool enable)
458 {
459         struct smu_feature *feature = &smu->smu_feature;
460         uint32_t feature_id;
461         int ret = 0;
462
463         feature_id = smu_feature_get_index(smu, mask);
464
465         WARN_ON(feature_id > feature->feature_num);
466
467         mutex_lock(&feature->mutex);
468         if (enable)
469                 test_and_set_bit(feature_id, feature->supported);
470         else
471                 test_and_clear_bit(feature_id, feature->supported);
472         mutex_unlock(&feature->mutex);
473
474         return ret;
475 }
476
477 static int smu_set_funcs(struct amdgpu_device *adev)
478 {
479         struct smu_context *smu = &adev->smu;
480
481         switch (adev->asic_type) {
482         case CHIP_VEGA20:
483         case CHIP_NAVI10:
484                 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
485                         smu->od_enabled = true;
486                 smu_v11_0_set_smu_funcs(smu);
487                 break;
488         default:
489                 return -EINVAL;
490         }
491
492         return 0;
493 }
494
495 static int smu_early_init(void *handle)
496 {
497         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
498         struct smu_context *smu = &adev->smu;
499
500         smu->adev = adev;
501         smu->pm_enabled = !!amdgpu_dpm;
502         mutex_init(&smu->mutex);
503
504         return smu_set_funcs(adev);
505 }
506
507 static int smu_late_init(void *handle)
508 {
509         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
510         struct smu_context *smu = &adev->smu;
511
512         if (!smu->pm_enabled)
513                 return 0;
514         mutex_lock(&smu->mutex);
515         smu_handle_task(&adev->smu,
516                         smu->smu_dpm.dpm_level,
517                         AMD_PP_TASK_COMPLETE_INIT);
518         mutex_unlock(&smu->mutex);
519
520         return 0;
521 }
522
523 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
524                             uint16_t *size, uint8_t *frev, uint8_t *crev,
525                             uint8_t **addr)
526 {
527         struct amdgpu_device *adev = smu->adev;
528         uint16_t data_start;
529
530         if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
531                                            size, frev, crev, &data_start))
532                 return -EINVAL;
533
534         *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
535
536         return 0;
537 }
538
539 static int smu_initialize_pptable(struct smu_context *smu)
540 {
541         /* TODO */
542         return 0;
543 }
544
545 static int smu_smc_table_sw_init(struct smu_context *smu)
546 {
547         int ret;
548
549         ret = smu_initialize_pptable(smu);
550         if (ret) {
551                 pr_err("Failed to init smu_initialize_pptable!\n");
552                 return ret;
553         }
554
555         /**
556          * Create smu_table structure, and init smc tables such as
557          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
558          */
559         ret = smu_init_smc_tables(smu);
560         if (ret) {
561                 pr_err("Failed to init smc tables!\n");
562                 return ret;
563         }
564
565         /**
566          * Create smu_power_context structure, and allocate smu_dpm_context and
567          * context size to fill the smu_power_context data.
568          */
569         ret = smu_init_power(smu);
570         if (ret) {
571                 pr_err("Failed to init smu_init_power!\n");
572                 return ret;
573         }
574
575         return 0;
576 }
577
578 static int smu_smc_table_sw_fini(struct smu_context *smu)
579 {
580         int ret;
581
582         ret = smu_fini_smc_tables(smu);
583         if (ret) {
584                 pr_err("Failed to smu_fini_smc_tables!\n");
585                 return ret;
586         }
587
588         return 0;
589 }
590
591 static int smu_sw_init(void *handle)
592 {
593         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
594         struct smu_context *smu = &adev->smu;
595         int ret;
596
597         smu->pool_size = adev->pm.smu_prv_buffer_size;
598         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
599         mutex_init(&smu->smu_feature.mutex);
600         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
601         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
602         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
603         smu->watermarks_bitmap = 0;
604         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
605         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
606
607         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
608         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
609         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
610         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
611         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
612         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
613         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
614         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
615
616         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
617         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
618         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
619         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
620         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
621         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
622         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
623         smu->display_config = &adev->pm.pm_display_cfg;
624
625         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
626         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
627         ret = smu_init_microcode(smu);
628         if (ret) {
629                 pr_err("Failed to load smu firmware!\n");
630                 return ret;
631         }
632
633         ret = smu_smc_table_sw_init(smu);
634         if (ret) {
635                 pr_err("Failed to sw init smc table!\n");
636                 return ret;
637         }
638
639         return 0;
640 }
641
642 static int smu_sw_fini(void *handle)
643 {
644         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
645         struct smu_context *smu = &adev->smu;
646         int ret;
647
648         ret = smu_smc_table_sw_fini(smu);
649         if (ret) {
650                 pr_err("Failed to sw fini smc table!\n");
651                 return ret;
652         }
653
654         ret = smu_fini_power(smu);
655         if (ret) {
656                 pr_err("Failed to init smu_fini_power!\n");
657                 return ret;
658         }
659
660         return 0;
661 }
662
663 static int smu_init_fb_allocations(struct smu_context *smu)
664 {
665         struct amdgpu_device *adev = smu->adev;
666         struct smu_table_context *smu_table = &smu->smu_table;
667         struct smu_table *tables = smu_table->tables;
668         uint32_t table_count = smu_table->table_count;
669         uint32_t i = 0;
670         int32_t ret = 0;
671
672         if (table_count <= 0)
673                 return -EINVAL;
674
675         for (i = 0 ; i < table_count; i++) {
676                 if (tables[i].size == 0)
677                         continue;
678                 ret = amdgpu_bo_create_kernel(adev,
679                                               tables[i].size,
680                                               tables[i].align,
681                                               tables[i].domain,
682                                               &tables[i].bo,
683                                               &tables[i].mc_address,
684                                               &tables[i].cpu_addr);
685                 if (ret)
686                         goto failed;
687         }
688
689         return 0;
690 failed:
691         for (; i > 0; i--) {
692                 if (tables[i].size == 0)
693                         continue;
694                 amdgpu_bo_free_kernel(&tables[i].bo,
695                                       &tables[i].mc_address,
696                                       &tables[i].cpu_addr);
697
698         }
699         return ret;
700 }
701
702 static int smu_fini_fb_allocations(struct smu_context *smu)
703 {
704         struct smu_table_context *smu_table = &smu->smu_table;
705         struct smu_table *tables = smu_table->tables;
706         uint32_t table_count = smu_table->table_count;
707         uint32_t i = 0;
708
709         if (table_count == 0 || tables == NULL)
710                 return 0;
711
712         for (i = 0 ; i < table_count; i++) {
713                 if (tables[i].size == 0)
714                         continue;
715                 amdgpu_bo_free_kernel(&tables[i].bo,
716                                       &tables[i].mc_address,
717                                       &tables[i].cpu_addr);
718         }
719
720         return 0;
721 }
722
723 static int smu_override_pcie_parameters(struct smu_context *smu)
724 {
725         struct amdgpu_device *adev = smu->adev;
726         uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
727         int ret;
728
729         if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
730                 pcie_gen = 3;
731         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
732                 pcie_gen = 2;
733         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
734                 pcie_gen = 1;
735         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
736                 pcie_gen = 0;
737
738         /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
739          * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
740          * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
741          */
742         if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
743                 pcie_width = 6;
744         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
745                 pcie_width = 5;
746         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
747                 pcie_width = 4;
748         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
749                 pcie_width = 3;
750         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
751                 pcie_width = 2;
752         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
753                 pcie_width = 1;
754
755         smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
756         ret = smu_send_smc_msg_with_param(smu,
757                                           SMU_MSG_OverridePcieParameters,
758                                           smu_pcie_arg);
759         if (ret)
760                 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
761         return ret;
762 }
763
764 static int smu_smc_table_hw_init(struct smu_context *smu,
765                                  bool initialize)
766 {
767         struct amdgpu_device *adev = smu->adev;
768         int ret;
769
770         if (smu_is_dpm_running(smu) && adev->in_suspend) {
771                 pr_info("dpm has been enabled\n");
772                 return 0;
773         }
774
775         ret = smu_init_display(smu);
776         if (ret)
777                 return ret;
778
779         if (initialize) {
780                 /* get boot_values from vbios to set revision, gfxclk, and etc. */
781                 ret = smu_get_vbios_bootup_values(smu);
782                 if (ret)
783                         return ret;
784
785                 ret = smu_setup_pptable(smu);
786                 if (ret)
787                         return ret;
788
789                 /*
790                  * check if the format_revision in vbios is up to pptable header
791                  * version, and the structure size is not 0.
792                  */
793                 ret = smu_check_pptable(smu);
794                 if (ret)
795                         return ret;
796
797                 /*
798                  * allocate vram bos to store smc table contents.
799                  */
800                 ret = smu_init_fb_allocations(smu);
801                 if (ret)
802                         return ret;
803
804                 /*
805                  * Parse pptable format and fill PPTable_t smc_pptable to
806                  * smu_table_context structure. And read the smc_dpm_table from vbios,
807                  * then fill it into smc_pptable.
808                  */
809                 ret = smu_parse_pptable(smu);
810                 if (ret)
811                         return ret;
812
813                 /*
814                  * Send msg GetDriverIfVersion to check if the return value is equal
815                  * with DRIVER_IF_VERSION of smc header.
816                  */
817                 ret = smu_check_fw_version(smu);
818                 if (ret)
819                         return ret;
820         }
821
822         /*
823          * Copy pptable bo in the vram to smc with SMU MSGs such as
824          * SetDriverDramAddr and TransferTableDram2Smu.
825          */
826         ret = smu_write_pptable(smu);
827         if (ret)
828                 return ret;
829
830         /* issue RunAfllBtc msg */
831         ret = smu_run_afll_btc(smu);
832         if (ret)
833                 return ret;
834
835         ret = smu_feature_set_allowed_mask(smu);
836         if (ret)
837                 return ret;
838
839         ret = smu_system_features_control(smu, true);
840         if (ret)
841                 return ret;
842
843         ret = smu_override_pcie_parameters(smu);
844         if (ret)
845                 return ret;
846
847         ret = smu_notify_display_change(smu);
848         if (ret)
849                 return ret;
850
851         /*
852          * Set min deep sleep dce fclk with bootup value from vbios via
853          * SetMinDeepSleepDcefclk MSG.
854          */
855         ret = smu_set_min_dcef_deep_sleep(smu);
856         if (ret)
857                 return ret;
858
859         /*
860          * Set initialized values (get from vbios) to dpm tables context such as
861          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
862          * type of clks.
863          */
864         if (initialize) {
865                 ret = smu_populate_smc_pptable(smu);
866                 if (ret)
867                         return ret;
868
869                 ret = smu_init_max_sustainable_clocks(smu);
870                 if (ret)
871                         return ret;
872         }
873
874         ret = smu_set_od8_default_settings(smu, initialize);
875         if (ret)
876                 return ret;
877
878         if (initialize) {
879                 ret = smu_populate_umd_state_clk(smu);
880                 if (ret)
881                         return ret;
882
883                 ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
884                 if (ret)
885                         return ret;
886         }
887
888         /*
889          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
890          */
891         ret = smu_set_tool_table_location(smu);
892
893         if (!smu_is_dpm_running(smu))
894                 pr_info("dpm has been disabled\n");
895
896         return ret;
897 }
898
899 /**
900  * smu_alloc_memory_pool - allocate memory pool in the system memory
901  *
902  * @smu: amdgpu_device pointer
903  *
904  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
905  * and DramLogSetDramAddr can notify it changed.
906  *
907  * Returns 0 on success, error on failure.
908  */
909 static int smu_alloc_memory_pool(struct smu_context *smu)
910 {
911         struct amdgpu_device *adev = smu->adev;
912         struct smu_table_context *smu_table = &smu->smu_table;
913         struct smu_table *memory_pool = &smu_table->memory_pool;
914         uint64_t pool_size = smu->pool_size;
915         int ret = 0;
916
917         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
918                 return ret;
919
920         memory_pool->size = pool_size;
921         memory_pool->align = PAGE_SIZE;
922         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
923
924         switch (pool_size) {
925         case SMU_MEMORY_POOL_SIZE_256_MB:
926         case SMU_MEMORY_POOL_SIZE_512_MB:
927         case SMU_MEMORY_POOL_SIZE_1_GB:
928         case SMU_MEMORY_POOL_SIZE_2_GB:
929                 ret = amdgpu_bo_create_kernel(adev,
930                                               memory_pool->size,
931                                               memory_pool->align,
932                                               memory_pool->domain,
933                                               &memory_pool->bo,
934                                               &memory_pool->mc_address,
935                                               &memory_pool->cpu_addr);
936                 break;
937         default:
938                 break;
939         }
940
941         return ret;
942 }
943
944 static int smu_free_memory_pool(struct smu_context *smu)
945 {
946         struct smu_table_context *smu_table = &smu->smu_table;
947         struct smu_table *memory_pool = &smu_table->memory_pool;
948         int ret = 0;
949
950         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
951                 return ret;
952
953         amdgpu_bo_free_kernel(&memory_pool->bo,
954                               &memory_pool->mc_address,
955                               &memory_pool->cpu_addr);
956
957         memset(memory_pool, 0, sizeof(struct smu_table));
958
959         return ret;
960 }
961
962 static int smu_hw_init(void *handle)
963 {
964         int ret;
965         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
966         struct smu_context *smu = &adev->smu;
967
968         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
969                 ret = smu_check_fw_status(smu);
970                 if (ret) {
971                         pr_err("SMC firmware status is not correct\n");
972                         return ret;
973                 }
974         }
975
976         mutex_lock(&smu->mutex);
977
978         ret = smu_feature_init_dpm(smu);
979         if (ret)
980                 goto failed;
981
982         ret = smu_smc_table_hw_init(smu, true);
983         if (ret)
984                 goto failed;
985
986         ret = smu_alloc_memory_pool(smu);
987         if (ret)
988                 goto failed;
989
990         /*
991          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
992          * pool location.
993          */
994         ret = smu_notify_memory_pool_location(smu);
995         if (ret)
996                 goto failed;
997
998         ret = smu_start_thermal_control(smu);
999         if (ret)
1000                 goto failed;
1001
1002         mutex_unlock(&smu->mutex);
1003
1004         if (!smu->pm_enabled)
1005                 adev->pm.dpm_enabled = false;
1006         else
1007                 adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1008
1009         pr_info("SMU is initialized successfully!\n");
1010
1011         return 0;
1012
1013 failed:
1014         mutex_unlock(&smu->mutex);
1015         return ret;
1016 }
1017
1018 static int smu_hw_fini(void *handle)
1019 {
1020         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1021         struct smu_context *smu = &adev->smu;
1022         struct smu_table_context *table_context = &smu->smu_table;
1023         int ret = 0;
1024
1025         kfree(table_context->driver_pptable);
1026         table_context->driver_pptable = NULL;
1027
1028         kfree(table_context->max_sustainable_clocks);
1029         table_context->max_sustainable_clocks = NULL;
1030
1031         kfree(table_context->od_feature_capabilities);
1032         table_context->od_feature_capabilities = NULL;
1033
1034         kfree(table_context->od_settings_max);
1035         table_context->od_settings_max = NULL;
1036
1037         kfree(table_context->od_settings_min);
1038         table_context->od_settings_min = NULL;
1039
1040         kfree(table_context->overdrive_table);
1041         table_context->overdrive_table = NULL;
1042
1043         kfree(table_context->od8_settings);
1044         table_context->od8_settings = NULL;
1045
1046         ret = smu_fini_fb_allocations(smu);
1047         if (ret)
1048                 return ret;
1049
1050         ret = smu_free_memory_pool(smu);
1051         if (ret)
1052                 return ret;
1053
1054         return 0;
1055 }
1056
1057 int smu_reset(struct smu_context *smu)
1058 {
1059         struct amdgpu_device *adev = smu->adev;
1060         int ret = 0;
1061
1062         ret = smu_hw_fini(adev);
1063         if (ret)
1064                 return ret;
1065
1066         ret = smu_hw_init(adev);
1067         if (ret)
1068                 return ret;
1069
1070         return ret;
1071 }
1072
1073 static int smu_suspend(void *handle)
1074 {
1075         int ret;
1076         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1077         struct smu_context *smu = &adev->smu;
1078
1079         ret = smu_system_features_control(smu, false);
1080         if (ret)
1081                 return ret;
1082
1083         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1084
1085         if (adev->asic_type >= CHIP_NAVI10 &&
1086             adev->gfx.rlc.funcs->stop)
1087                 adev->gfx.rlc.funcs->stop(adev);
1088
1089         return 0;
1090 }
1091
1092 static int smu_resume(void *handle)
1093 {
1094         int ret;
1095         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1096         struct smu_context *smu = &adev->smu;
1097
1098         pr_info("SMU is resuming...\n");
1099
1100         mutex_lock(&smu->mutex);
1101
1102         ret = smu_smc_table_hw_init(smu, false);
1103         if (ret)
1104                 goto failed;
1105
1106         ret = smu_start_thermal_control(smu);
1107         if (ret)
1108                 goto failed;
1109
1110         mutex_unlock(&smu->mutex);
1111
1112         pr_info("SMU is resumed successfully!\n");
1113
1114         return 0;
1115 failed:
1116         mutex_unlock(&smu->mutex);
1117         return ret;
1118 }
1119
1120 int smu_display_configuration_change(struct smu_context *smu,
1121                                      const struct amd_pp_display_configuration *display_config)
1122 {
1123         int index = 0;
1124         int num_of_active_display = 0;
1125
1126         if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1127                 return -EINVAL;
1128
1129         if (!display_config)
1130                 return -EINVAL;
1131
1132         mutex_lock(&smu->mutex);
1133
1134         smu_set_deep_sleep_dcefclk(smu,
1135                                    display_config->min_dcef_deep_sleep_set_clk / 100);
1136
1137         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1138                 if (display_config->displays[index].controller_id != 0)
1139                         num_of_active_display++;
1140         }
1141
1142         smu_set_active_display_count(smu, num_of_active_display);
1143
1144         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1145                            display_config->cpu_cc6_disable,
1146                            display_config->cpu_pstate_disable,
1147                            display_config->nb_pstate_switch_disable);
1148
1149         mutex_unlock(&smu->mutex);
1150
1151         return 0;
1152 }
1153
1154 static int smu_get_clock_info(struct smu_context *smu,
1155                               struct smu_clock_info *clk_info,
1156                               enum smu_perf_level_designation designation)
1157 {
1158         int ret;
1159         struct smu_performance_level level = {0};
1160
1161         if (!clk_info)
1162                 return -EINVAL;
1163
1164         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1165         if (ret)
1166                 return -EINVAL;
1167
1168         clk_info->min_mem_clk = level.memory_clock;
1169         clk_info->min_eng_clk = level.core_clock;
1170         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1171
1172         ret = smu_get_perf_level(smu, designation, &level);
1173         if (ret)
1174                 return -EINVAL;
1175
1176         clk_info->min_mem_clk = level.memory_clock;
1177         clk_info->min_eng_clk = level.core_clock;
1178         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1179
1180         return 0;
1181 }
1182
1183 int smu_get_current_clocks(struct smu_context *smu,
1184                            struct amd_pp_clock_info *clocks)
1185 {
1186         struct amd_pp_simple_clock_info simple_clocks = {0};
1187         struct smu_clock_info hw_clocks;
1188         int ret = 0;
1189
1190         if (!is_support_sw_smu(smu->adev))
1191                 return -EINVAL;
1192
1193         mutex_lock(&smu->mutex);
1194
1195         smu_get_dal_power_level(smu, &simple_clocks);
1196
1197         if (smu->support_power_containment)
1198                 ret = smu_get_clock_info(smu, &hw_clocks,
1199                                          PERF_LEVEL_POWER_CONTAINMENT);
1200         else
1201                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1202
1203         if (ret) {
1204                 pr_err("Error in smu_get_clock_info\n");
1205                 goto failed;
1206         }
1207
1208         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1209         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1210         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1211         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1212         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1213         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1214         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1215         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1216
1217         if (simple_clocks.level == 0)
1218                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1219         else
1220                 clocks->max_clocks_state = simple_clocks.level;
1221
1222         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1223                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1224                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1225         }
1226
1227 failed:
1228         mutex_unlock(&smu->mutex);
1229         return ret;
1230 }
1231
1232 static int smu_set_clockgating_state(void *handle,
1233                                      enum amd_clockgating_state state)
1234 {
1235         return 0;
1236 }
1237
1238 static int smu_set_powergating_state(void *handle,
1239                                      enum amd_powergating_state state)
1240 {
1241         return 0;
1242 }
1243
1244 static int smu_enable_umd_pstate(void *handle,
1245                       enum amd_dpm_forced_level *level)
1246 {
1247         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1248                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1249                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1250                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1251
1252         struct smu_context *smu = (struct smu_context*)(handle);
1253         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1254         if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
1255                 return -EINVAL;
1256
1257         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1258                 /* enter umd pstate, save current level, disable gfx cg*/
1259                 if (*level & profile_mode_mask) {
1260                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1261                         smu_dpm_ctx->enable_umd_pstate = true;
1262                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1263                                                                AMD_IP_BLOCK_TYPE_GFX,
1264                                                                AMD_CG_STATE_UNGATE);
1265                         amdgpu_device_ip_set_powergating_state(smu->adev,
1266                                                                AMD_IP_BLOCK_TYPE_GFX,
1267                                                                AMD_PG_STATE_UNGATE);
1268                 }
1269         } else {
1270                 /* exit umd pstate, restore level, enable gfx cg*/
1271                 if (!(*level & profile_mode_mask)) {
1272                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1273                                 *level = smu_dpm_ctx->saved_dpm_level;
1274                         smu_dpm_ctx->enable_umd_pstate = false;
1275                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1276                                                                AMD_IP_BLOCK_TYPE_GFX,
1277                                                                AMD_CG_STATE_GATE);
1278                         amdgpu_device_ip_set_powergating_state(smu->adev,
1279                                                                AMD_IP_BLOCK_TYPE_GFX,
1280                                                                AMD_PG_STATE_GATE);
1281                 }
1282         }
1283
1284         return 0;
1285 }
1286
1287 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1288                                    enum amd_dpm_forced_level level,
1289                                    bool skip_display_settings)
1290 {
1291         int ret = 0;
1292         int index = 0;
1293         uint32_t sclk_mask, mclk_mask, soc_mask;
1294         long workload;
1295         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1296
1297         if (!smu->pm_enabled)
1298                 return -EINVAL;
1299         if (!skip_display_settings) {
1300                 ret = smu_display_config_changed(smu);
1301                 if (ret) {
1302                         pr_err("Failed to change display config!");
1303                         return ret;
1304                 }
1305         }
1306
1307         if (!smu->pm_enabled)
1308                 return -EINVAL;
1309         ret = smu_apply_clocks_adjust_rules(smu);
1310         if (ret) {
1311                 pr_err("Failed to apply clocks adjust rules!");
1312                 return ret;
1313         }
1314
1315         if (!skip_display_settings) {
1316                 ret = smu_notify_smc_dispaly_config(smu);
1317                 if (ret) {
1318                         pr_err("Failed to notify smc display config!");
1319                         return ret;
1320                 }
1321         }
1322
1323         if (smu_dpm_ctx->dpm_level != level) {
1324                 switch (level) {
1325                 case AMD_DPM_FORCED_LEVEL_HIGH:
1326                         ret = smu_force_dpm_limit_value(smu, true);
1327                         break;
1328                 case AMD_DPM_FORCED_LEVEL_LOW:
1329                         ret = smu_force_dpm_limit_value(smu, false);
1330                         break;
1331
1332                 case AMD_DPM_FORCED_LEVEL_AUTO:
1333                         ret = smu_unforce_dpm_levels(smu);
1334                         break;
1335
1336                 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1337                 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1338                 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1339                 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1340                         ret = smu_get_profiling_clk_mask(smu, level,
1341                                                          &sclk_mask,
1342                                                          &mclk_mask,
1343                                                          &soc_mask);
1344                         if (ret)
1345                                 return ret;
1346                         smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
1347                         smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
1348                         break;
1349
1350                 case AMD_DPM_FORCED_LEVEL_MANUAL:
1351                 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1352                 default:
1353                         break;
1354                 }
1355
1356                 if (!ret)
1357                         smu_dpm_ctx->dpm_level = level;
1358         }
1359
1360         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1361                 index = fls(smu->workload_mask);
1362                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1363                 workload = smu->workload_setting[index];
1364
1365                 if (smu->power_profile_mode != workload)
1366                         smu_set_power_profile_mode(smu, &workload, 0);
1367         }
1368
1369         return ret;
1370 }
1371
1372 int smu_handle_task(struct smu_context *smu,
1373                     enum amd_dpm_forced_level level,
1374                     enum amd_pp_task task_id)
1375 {
1376         int ret = 0;
1377
1378         switch (task_id) {
1379         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1380                 ret = smu_pre_display_config_changed(smu);
1381                 if (ret)
1382                         return ret;
1383                 ret = smu_set_cpu_power_state(smu);
1384                 if (ret)
1385                         return ret;
1386                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1387                 break;
1388         case AMD_PP_TASK_COMPLETE_INIT:
1389         case AMD_PP_TASK_READJUST_POWER_STATE:
1390                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1391                 break;
1392         default:
1393                 break;
1394         }
1395
1396         return ret;
1397 }
1398
1399 const struct amd_ip_funcs smu_ip_funcs = {
1400         .name = "smu",
1401         .early_init = smu_early_init,
1402         .late_init = smu_late_init,
1403         .sw_init = smu_sw_init,
1404         .sw_fini = smu_sw_fini,
1405         .hw_init = smu_hw_init,
1406         .hw_fini = smu_hw_fini,
1407         .suspend = smu_suspend,
1408         .resume = smu_resume,
1409         .is_idle = NULL,
1410         .check_soft_reset = NULL,
1411         .wait_for_idle = NULL,
1412         .soft_reset = NULL,
1413         .set_clockgating_state = smu_set_clockgating_state,
1414         .set_powergating_state = smu_set_powergating_state,
1415         .enable_umd_pstate = smu_enable_umd_pstate,
1416 };
1417
1418 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1419 {
1420         .type = AMD_IP_BLOCK_TYPE_SMC,
1421         .major = 11,
1422         .minor = 0,
1423         .rev = 0,
1424         .funcs = &smu_ip_funcs,
1425 };