]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drm/amd/powerplay: split out those internal used swSMU APIs V2
[linux.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/firmware.h>
24
25 #include "pp_debug.h"
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "soc15_common.h"
30 #include "smu_v11_0.h"
31 #include "smu_v12_0.h"
32 #include "atom.h"
33 #include "amd_pcie.h"
34
35 #undef __SMU_DUMMY_MAP
36 #define __SMU_DUMMY_MAP(type)   #type
37 static const char* __smu_message_names[] = {
38         SMU_MESSAGE_TYPES
39 };
40
41 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
42 {
43         if (type < 0 || type >= SMU_MSG_MAX_COUNT)
44                 return "unknown smu message";
45         return __smu_message_names[type];
46 }
47
48 #undef __SMU_DUMMY_MAP
49 #define __SMU_DUMMY_MAP(fea)    #fea
50 static const char* __smu_feature_names[] = {
51         SMU_FEATURE_MASKS
52 };
53
54 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
55 {
56         if (feature < 0 || feature >= SMU_FEATURE_COUNT)
57                 return "unknown smu feature";
58         return __smu_feature_names[feature];
59 }
60
61 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
62 {
63         size_t size = 0;
64         int ret = 0, i = 0;
65         uint32_t feature_mask[2] = { 0 };
66         int32_t feature_index = 0;
67         uint32_t count = 0;
68         uint32_t sort_feature[SMU_FEATURE_COUNT];
69         uint64_t hw_feature_count = 0;
70
71         mutex_lock(&smu->mutex);
72
73         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
74         if (ret)
75                 goto failed;
76
77         size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
78                         feature_mask[1], feature_mask[0]);
79
80         for (i = 0; i < SMU_FEATURE_COUNT; i++) {
81                 feature_index = smu_feature_get_index(smu, i);
82                 if (feature_index < 0)
83                         continue;
84                 sort_feature[feature_index] = i;
85                 hw_feature_count++;
86         }
87
88         for (i = 0; i < hw_feature_count; i++) {
89                 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
90                                count++,
91                                smu_get_feature_name(smu, sort_feature[i]),
92                                i,
93                                !!smu_feature_is_enabled(smu, sort_feature[i]) ?
94                                "enabled" : "disabled");
95         }
96
97 failed:
98         mutex_unlock(&smu->mutex);
99
100         return size;
101 }
102
103 static int smu_feature_update_enable_state(struct smu_context *smu,
104                                            uint64_t feature_mask,
105                                            bool enabled)
106 {
107         struct smu_feature *feature = &smu->smu_feature;
108         uint32_t feature_low = 0, feature_high = 0;
109         int ret = 0;
110
111         if (!smu->pm_enabled)
112                 return ret;
113
114         feature_low = (feature_mask >> 0 ) & 0xffffffff;
115         feature_high = (feature_mask >> 32) & 0xffffffff;
116
117         if (enabled) {
118                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
119                                                   feature_low);
120                 if (ret)
121                         return ret;
122                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
123                                                   feature_high);
124                 if (ret)
125                         return ret;
126         } else {
127                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
128                                                   feature_low);
129                 if (ret)
130                         return ret;
131                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
132                                                   feature_high);
133                 if (ret)
134                         return ret;
135         }
136
137         mutex_lock(&feature->mutex);
138         if (enabled)
139                 bitmap_or(feature->enabled, feature->enabled,
140                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
141         else
142                 bitmap_andnot(feature->enabled, feature->enabled,
143                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
144         mutex_unlock(&feature->mutex);
145
146         return ret;
147 }
148
149 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
150 {
151         int ret = 0;
152         uint32_t feature_mask[2] = { 0 };
153         uint64_t feature_2_enabled = 0;
154         uint64_t feature_2_disabled = 0;
155         uint64_t feature_enables = 0;
156
157         mutex_lock(&smu->mutex);
158
159         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
160         if (ret)
161                 goto out;
162
163         feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
164
165         feature_2_enabled  = ~feature_enables & new_mask;
166         feature_2_disabled = feature_enables & ~new_mask;
167
168         if (feature_2_enabled) {
169                 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
170                 if (ret)
171                         goto out;
172         }
173         if (feature_2_disabled) {
174                 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
175                 if (ret)
176                         goto out;
177         }
178
179 out:
180         mutex_unlock(&smu->mutex);
181
182         return ret;
183 }
184
185 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
186 {
187         int ret = 0;
188
189         if (!if_version && !smu_version)
190                 return -EINVAL;
191
192         if (if_version) {
193                 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
194                 if (ret)
195                         return ret;
196
197                 ret = smu_read_smc_arg(smu, if_version);
198                 if (ret)
199                         return ret;
200         }
201
202         if (smu_version) {
203                 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
204                 if (ret)
205                         return ret;
206
207                 ret = smu_read_smc_arg(smu, smu_version);
208                 if (ret)
209                         return ret;
210         }
211
212         return ret;
213 }
214
215 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
216                             uint32_t min, uint32_t max)
217 {
218         int ret = 0;
219
220         if (min <= 0 && max <= 0)
221                 return -EINVAL;
222
223         if (!smu_clk_dpm_is_enabled(smu, clk_type))
224                 return 0;
225
226         ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
227         return ret;
228 }
229
230 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
231                             uint32_t min, uint32_t max)
232 {
233         int ret = 0, clk_id = 0;
234         uint32_t param;
235
236         if (min <= 0 && max <= 0)
237                 return -EINVAL;
238
239         if (!smu_clk_dpm_is_enabled(smu, clk_type))
240                 return 0;
241
242         clk_id = smu_clk_get_index(smu, clk_type);
243         if (clk_id < 0)
244                 return clk_id;
245
246         if (max > 0) {
247                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
248                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
249                                                   param);
250                 if (ret)
251                         return ret;
252         }
253
254         if (min > 0) {
255                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
256                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
257                                                   param);
258                 if (ret)
259                         return ret;
260         }
261
262
263         return ret;
264 }
265
266 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
267                            uint32_t *min, uint32_t *max, bool lock_needed)
268 {
269         uint32_t clock_limit;
270         int ret = 0;
271
272         if (!min && !max)
273                 return -EINVAL;
274
275         if (lock_needed)
276                 mutex_lock(&smu->mutex);
277
278         if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
279                 switch (clk_type) {
280                 case SMU_MCLK:
281                 case SMU_UCLK:
282                         clock_limit = smu->smu_table.boot_values.uclk;
283                         break;
284                 case SMU_GFXCLK:
285                 case SMU_SCLK:
286                         clock_limit = smu->smu_table.boot_values.gfxclk;
287                         break;
288                 case SMU_SOCCLK:
289                         clock_limit = smu->smu_table.boot_values.socclk;
290                         break;
291                 default:
292                         clock_limit = 0;
293                         break;
294                 }
295
296                 /* clock in Mhz unit */
297                 if (min)
298                         *min = clock_limit / 100;
299                 if (max)
300                         *max = clock_limit / 100;
301         } else {
302                 /*
303                  * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
304                  * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
305                  */
306                 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
307         }
308
309         if (lock_needed)
310                 mutex_unlock(&smu->mutex);
311
312         return ret;
313 }
314
315 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
316                               uint16_t level, uint32_t *value)
317 {
318         int ret = 0, clk_id = 0;
319         uint32_t param;
320
321         if (!value)
322                 return -EINVAL;
323
324         if (!smu_clk_dpm_is_enabled(smu, clk_type))
325                 return 0;
326
327         clk_id = smu_clk_get_index(smu, clk_type);
328         if (clk_id < 0)
329                 return clk_id;
330
331         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
332
333         ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
334                                           param);
335         if (ret)
336                 return ret;
337
338         ret = smu_read_smc_arg(smu, &param);
339         if (ret)
340                 return ret;
341
342         /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
343          * now, we un-support it */
344         *value = param & 0x7fffffff;
345
346         return ret;
347 }
348
349 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
350                             uint32_t *value)
351 {
352         return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
353 }
354
355 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
356 {
357         enum smu_feature_mask feature_id = 0;
358
359         switch (clk_type) {
360         case SMU_MCLK:
361         case SMU_UCLK:
362                 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
363                 break;
364         case SMU_GFXCLK:
365         case SMU_SCLK:
366                 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
367                 break;
368         case SMU_SOCCLK:
369                 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
370                 break;
371         default:
372                 return true;
373         }
374
375         if(!smu_feature_is_enabled(smu, feature_id)) {
376                 return false;
377         }
378
379         return true;
380 }
381
382
383 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
384                            bool gate)
385 {
386         int ret = 0;
387
388         mutex_lock(&smu->mutex);
389
390         switch (block_type) {
391         case AMD_IP_BLOCK_TYPE_UVD:
392                 ret = smu_dpm_set_uvd_enable(smu, gate);
393                 break;
394         case AMD_IP_BLOCK_TYPE_VCE:
395                 ret = smu_dpm_set_vce_enable(smu, gate);
396                 break;
397         case AMD_IP_BLOCK_TYPE_GFX:
398                 ret = smu_gfx_off_control(smu, gate);
399                 break;
400         case AMD_IP_BLOCK_TYPE_SDMA:
401                 ret = smu_powergate_sdma(smu, gate);
402                 break;
403         default:
404                 break;
405         }
406
407         mutex_unlock(&smu->mutex);
408
409         return ret;
410 }
411
412 int smu_get_power_num_states(struct smu_context *smu,
413                              struct pp_states_info *state_info)
414 {
415         if (!state_info)
416                 return -EINVAL;
417
418         /* not support power state */
419         memset(state_info, 0, sizeof(struct pp_states_info));
420         state_info->nums = 1;
421         state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
422
423         return 0;
424 }
425
426 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
427                            void *data, uint32_t *size)
428 {
429         struct smu_power_context *smu_power = &smu->smu_power;
430         struct smu_power_gate *power_gate = &smu_power->power_gate;
431         int ret = 0;
432
433         if(!data || !size)
434                 return -EINVAL;
435
436         switch (sensor) {
437         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
438                 *((uint32_t *)data) = smu->pstate_sclk;
439                 *size = 4;
440                 break;
441         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
442                 *((uint32_t *)data) = smu->pstate_mclk;
443                 *size = 4;
444                 break;
445         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
446                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
447                 *size = 8;
448                 break;
449         case AMDGPU_PP_SENSOR_UVD_POWER:
450                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
451                 *size = 4;
452                 break;
453         case AMDGPU_PP_SENSOR_VCE_POWER:
454                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
455                 *size = 4;
456                 break;
457         case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
458                 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
459                 *size = 4;
460                 break;
461         default:
462                 ret = -EINVAL;
463                 break;
464         }
465
466         if (ret)
467                 *size = 0;
468
469         return ret;
470 }
471
472 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
473                      void *table_data, bool drv2smu)
474 {
475         struct smu_table_context *smu_table = &smu->smu_table;
476         struct amdgpu_device *adev = smu->adev;
477         struct smu_table *table = NULL;
478         int ret = 0;
479         int table_id = smu_table_get_index(smu, table_index);
480
481         if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
482                 return -EINVAL;
483
484         table = &smu_table->tables[table_index];
485
486         if (drv2smu)
487                 memcpy(table->cpu_addr, table_data, table->size);
488
489         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
490                                           upper_32_bits(table->mc_address));
491         if (ret)
492                 return ret;
493         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
494                                           lower_32_bits(table->mc_address));
495         if (ret)
496                 return ret;
497         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
498                                           SMU_MSG_TransferTableDram2Smu :
499                                           SMU_MSG_TransferTableSmu2Dram,
500                                           table_id | ((argument & 0xFFFF) << 16));
501         if (ret)
502                 return ret;
503
504         /* flush hdp cache */
505         adev->nbio.funcs->hdp_flush(adev, NULL);
506
507         if (!drv2smu)
508                 memcpy(table_data, table->cpu_addr, table->size);
509
510         return ret;
511 }
512
513 bool is_support_sw_smu(struct amdgpu_device *adev)
514 {
515         if (adev->asic_type == CHIP_VEGA20)
516                 return (amdgpu_dpm == 2) ? true : false;
517         else if (adev->asic_type >= CHIP_ARCTURUS)
518                 return true;
519         else
520                 return false;
521 }
522
523 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
524 {
525         if (amdgpu_dpm != 1)
526                 return false;
527
528         if (adev->asic_type == CHIP_VEGA20)
529                 return true;
530
531         return false;
532 }
533
534 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
535 {
536         struct smu_table_context *smu_table = &smu->smu_table;
537         uint32_t powerplay_table_size;
538
539         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
540                 return -EINVAL;
541
542         mutex_lock(&smu->mutex);
543
544         if (smu_table->hardcode_pptable)
545                 *table = smu_table->hardcode_pptable;
546         else
547                 *table = smu_table->power_play_table;
548
549         powerplay_table_size = smu_table->power_play_table_size;
550
551         mutex_unlock(&smu->mutex);
552
553         return powerplay_table_size;
554 }
555
556 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
557 {
558         struct smu_table_context *smu_table = &smu->smu_table;
559         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
560         int ret = 0;
561
562         if (!smu->pm_enabled)
563                 return -EINVAL;
564         if (header->usStructureSize != size) {
565                 pr_err("pp table size not matched !\n");
566                 return -EIO;
567         }
568
569         mutex_lock(&smu->mutex);
570         if (!smu_table->hardcode_pptable)
571                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
572         if (!smu_table->hardcode_pptable) {
573                 ret = -ENOMEM;
574                 goto failed;
575         }
576
577         memcpy(smu_table->hardcode_pptable, buf, size);
578         smu_table->power_play_table = smu_table->hardcode_pptable;
579         smu_table->power_play_table_size = size;
580
581         ret = smu_reset(smu);
582         if (ret)
583                 pr_info("smu reset failed, ret = %d\n", ret);
584
585 failed:
586         mutex_unlock(&smu->mutex);
587         return ret;
588 }
589
590 int smu_feature_init_dpm(struct smu_context *smu)
591 {
592         struct smu_feature *feature = &smu->smu_feature;
593         int ret = 0;
594         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
595
596         if (!smu->pm_enabled)
597                 return ret;
598         mutex_lock(&feature->mutex);
599         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
600         mutex_unlock(&feature->mutex);
601
602         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
603                                              SMU_FEATURE_MAX/32);
604         if (ret)
605                 return ret;
606
607         mutex_lock(&feature->mutex);
608         bitmap_or(feature->allowed, feature->allowed,
609                       (unsigned long *)allowed_feature_mask,
610                       feature->feature_num);
611         mutex_unlock(&feature->mutex);
612
613         return ret;
614 }
615
616
617 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
618 {
619         struct amdgpu_device *adev = smu->adev;
620         struct smu_feature *feature = &smu->smu_feature;
621         int feature_id;
622         int ret = 0;
623
624         if (adev->flags & AMD_IS_APU)
625                 return 1;
626
627         feature_id = smu_feature_get_index(smu, mask);
628         if (feature_id < 0)
629                 return 0;
630
631         WARN_ON(feature_id > feature->feature_num);
632
633         mutex_lock(&feature->mutex);
634         ret = test_bit(feature_id, feature->enabled);
635         mutex_unlock(&feature->mutex);
636
637         return ret;
638 }
639
640 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
641                             bool enable)
642 {
643         struct smu_feature *feature = &smu->smu_feature;
644         int feature_id;
645
646         feature_id = smu_feature_get_index(smu, mask);
647         if (feature_id < 0)
648                 return -EINVAL;
649
650         WARN_ON(feature_id > feature->feature_num);
651
652         return smu_feature_update_enable_state(smu,
653                                                1ULL << feature_id,
654                                                enable);
655 }
656
657 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
658 {
659         struct smu_feature *feature = &smu->smu_feature;
660         int feature_id;
661         int ret = 0;
662
663         feature_id = smu_feature_get_index(smu, mask);
664         if (feature_id < 0)
665                 return 0;
666
667         WARN_ON(feature_id > feature->feature_num);
668
669         mutex_lock(&feature->mutex);
670         ret = test_bit(feature_id, feature->supported);
671         mutex_unlock(&feature->mutex);
672
673         return ret;
674 }
675
676 int smu_feature_set_supported(struct smu_context *smu,
677                               enum smu_feature_mask mask,
678                               bool enable)
679 {
680         struct smu_feature *feature = &smu->smu_feature;
681         int feature_id;
682         int ret = 0;
683
684         feature_id = smu_feature_get_index(smu, mask);
685         if (feature_id < 0)
686                 return -EINVAL;
687
688         WARN_ON(feature_id > feature->feature_num);
689
690         mutex_lock(&feature->mutex);
691         if (enable)
692                 test_and_set_bit(feature_id, feature->supported);
693         else
694                 test_and_clear_bit(feature_id, feature->supported);
695         mutex_unlock(&feature->mutex);
696
697         return ret;
698 }
699
700 static int smu_set_funcs(struct amdgpu_device *adev)
701 {
702         struct smu_context *smu = &adev->smu;
703
704         switch (adev->asic_type) {
705         case CHIP_VEGA20:
706         case CHIP_NAVI10:
707         case CHIP_NAVI14:
708         case CHIP_NAVI12:
709         case CHIP_ARCTURUS:
710                 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
711                         smu->od_enabled = true;
712                 smu_v11_0_set_smu_funcs(smu);
713                 break;
714         case CHIP_RENOIR:
715                 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
716                         smu->od_enabled = true;
717                 smu_v12_0_set_smu_funcs(smu);
718                 break;
719         default:
720                 return -EINVAL;
721         }
722
723         return 0;
724 }
725
726 static int smu_early_init(void *handle)
727 {
728         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
729         struct smu_context *smu = &adev->smu;
730
731         smu->adev = adev;
732         smu->pm_enabled = !!amdgpu_dpm;
733         smu->is_apu = false;
734         mutex_init(&smu->mutex);
735
736         return smu_set_funcs(adev);
737 }
738
739 static int smu_late_init(void *handle)
740 {
741         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
742         struct smu_context *smu = &adev->smu;
743
744         if (!smu->pm_enabled)
745                 return 0;
746
747         smu_handle_task(&adev->smu,
748                         smu->smu_dpm.dpm_level,
749                         AMD_PP_TASK_COMPLETE_INIT,
750                         false);
751
752         return 0;
753 }
754
755 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
756                             uint16_t *size, uint8_t *frev, uint8_t *crev,
757                             uint8_t **addr)
758 {
759         struct amdgpu_device *adev = smu->adev;
760         uint16_t data_start;
761
762         if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
763                                            size, frev, crev, &data_start))
764                 return -EINVAL;
765
766         *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
767
768         return 0;
769 }
770
771 static int smu_initialize_pptable(struct smu_context *smu)
772 {
773         /* TODO */
774         return 0;
775 }
776
777 static int smu_smc_table_sw_init(struct smu_context *smu)
778 {
779         int ret;
780
781         ret = smu_initialize_pptable(smu);
782         if (ret) {
783                 pr_err("Failed to init smu_initialize_pptable!\n");
784                 return ret;
785         }
786
787         /**
788          * Create smu_table structure, and init smc tables such as
789          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
790          */
791         ret = smu_init_smc_tables(smu);
792         if (ret) {
793                 pr_err("Failed to init smc tables!\n");
794                 return ret;
795         }
796
797         /**
798          * Create smu_power_context structure, and allocate smu_dpm_context and
799          * context size to fill the smu_power_context data.
800          */
801         ret = smu_init_power(smu);
802         if (ret) {
803                 pr_err("Failed to init smu_init_power!\n");
804                 return ret;
805         }
806
807         return 0;
808 }
809
810 static int smu_smc_table_sw_fini(struct smu_context *smu)
811 {
812         int ret;
813
814         ret = smu_fini_smc_tables(smu);
815         if (ret) {
816                 pr_err("Failed to smu_fini_smc_tables!\n");
817                 return ret;
818         }
819
820         return 0;
821 }
822
823 static int smu_sw_init(void *handle)
824 {
825         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
826         struct smu_context *smu = &adev->smu;
827         int ret;
828
829         smu->pool_size = adev->pm.smu_prv_buffer_size;
830         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
831         mutex_init(&smu->smu_feature.mutex);
832         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
833         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
834         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
835
836         mutex_init(&smu->smu_baco.mutex);
837         smu->smu_baco.state = SMU_BACO_STATE_EXIT;
838         smu->smu_baco.platform_support = false;
839
840         mutex_init(&smu->sensor_lock);
841
842         smu->watermarks_bitmap = 0;
843         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
844         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
845
846         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
847         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
848         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
849         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
850         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
851         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
852         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
853         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
854
855         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
856         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
857         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
858         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
859         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
860         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
861         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
862         smu->display_config = &adev->pm.pm_display_cfg;
863
864         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
865         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
866         ret = smu_init_microcode(smu);
867         if (ret) {
868                 pr_err("Failed to load smu firmware!\n");
869                 return ret;
870         }
871
872         ret = smu_smc_table_sw_init(smu);
873         if (ret) {
874                 pr_err("Failed to sw init smc table!\n");
875                 return ret;
876         }
877
878         ret = smu_register_irq_handler(smu);
879         if (ret) {
880                 pr_err("Failed to register smc irq handler!\n");
881                 return ret;
882         }
883
884         return 0;
885 }
886
887 static int smu_sw_fini(void *handle)
888 {
889         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
890         struct smu_context *smu = &adev->smu;
891         int ret;
892
893         kfree(smu->irq_source);
894         smu->irq_source = NULL;
895
896         ret = smu_smc_table_sw_fini(smu);
897         if (ret) {
898                 pr_err("Failed to sw fini smc table!\n");
899                 return ret;
900         }
901
902         ret = smu_fini_power(smu);
903         if (ret) {
904                 pr_err("Failed to init smu_fini_power!\n");
905                 return ret;
906         }
907
908         return 0;
909 }
910
911 static int smu_init_fb_allocations(struct smu_context *smu)
912 {
913         struct amdgpu_device *adev = smu->adev;
914         struct smu_table_context *smu_table = &smu->smu_table;
915         struct smu_table *tables = smu_table->tables;
916         int ret, i;
917
918         for (i = 0; i < SMU_TABLE_COUNT; i++) {
919                 if (tables[i].size == 0)
920                         continue;
921                 ret = amdgpu_bo_create_kernel(adev,
922                                               tables[i].size,
923                                               tables[i].align,
924                                               tables[i].domain,
925                                               &tables[i].bo,
926                                               &tables[i].mc_address,
927                                               &tables[i].cpu_addr);
928                 if (ret)
929                         goto failed;
930         }
931
932         return 0;
933 failed:
934         while (--i >= 0) {
935                 if (tables[i].size == 0)
936                         continue;
937                 amdgpu_bo_free_kernel(&tables[i].bo,
938                                       &tables[i].mc_address,
939                                       &tables[i].cpu_addr);
940
941         }
942         return ret;
943 }
944
945 static int smu_fini_fb_allocations(struct smu_context *smu)
946 {
947         struct smu_table_context *smu_table = &smu->smu_table;
948         struct smu_table *tables = smu_table->tables;
949         uint32_t i = 0;
950
951         if (!tables)
952                 return 0;
953
954         for (i = 0; i < SMU_TABLE_COUNT; i++) {
955                 if (tables[i].size == 0)
956                         continue;
957                 amdgpu_bo_free_kernel(&tables[i].bo,
958                                       &tables[i].mc_address,
959                                       &tables[i].cpu_addr);
960         }
961
962         return 0;
963 }
964
965 static int smu_smc_table_hw_init(struct smu_context *smu,
966                                  bool initialize)
967 {
968         struct amdgpu_device *adev = smu->adev;
969         int ret;
970
971         if (smu_is_dpm_running(smu) && adev->in_suspend) {
972                 pr_info("dpm has been enabled\n");
973                 return 0;
974         }
975
976         if (adev->asic_type != CHIP_ARCTURUS) {
977                 ret = smu_init_display_count(smu, 0);
978                 if (ret)
979                         return ret;
980         }
981
982         if (initialize) {
983                 /* get boot_values from vbios to set revision, gfxclk, and etc. */
984                 ret = smu_get_vbios_bootup_values(smu);
985                 if (ret)
986                         return ret;
987
988                 ret = smu_setup_pptable(smu);
989                 if (ret)
990                         return ret;
991
992                 ret = smu_get_clk_info_from_vbios(smu);
993                 if (ret)
994                         return ret;
995
996                 /*
997                  * check if the format_revision in vbios is up to pptable header
998                  * version, and the structure size is not 0.
999                  */
1000                 ret = smu_check_pptable(smu);
1001                 if (ret)
1002                         return ret;
1003
1004                 /*
1005                  * allocate vram bos to store smc table contents.
1006                  */
1007                 ret = smu_init_fb_allocations(smu);
1008                 if (ret)
1009                         return ret;
1010
1011                 /*
1012                  * Parse pptable format and fill PPTable_t smc_pptable to
1013                  * smu_table_context structure. And read the smc_dpm_table from vbios,
1014                  * then fill it into smc_pptable.
1015                  */
1016                 ret = smu_parse_pptable(smu);
1017                 if (ret)
1018                         return ret;
1019
1020                 /*
1021                  * Send msg GetDriverIfVersion to check if the return value is equal
1022                  * with DRIVER_IF_VERSION of smc header.
1023                  */
1024                 ret = smu_check_fw_version(smu);
1025                 if (ret)
1026                         return ret;
1027         }
1028
1029         /* smu_dump_pptable(smu); */
1030
1031         /*
1032          * Copy pptable bo in the vram to smc with SMU MSGs such as
1033          * SetDriverDramAddr and TransferTableDram2Smu.
1034          */
1035         ret = smu_write_pptable(smu);
1036         if (ret)
1037                 return ret;
1038
1039         /* issue Run*Btc msg */
1040         ret = smu_run_btc(smu);
1041         if (ret)
1042                 return ret;
1043
1044         ret = smu_feature_set_allowed_mask(smu);
1045         if (ret)
1046                 return ret;
1047
1048         ret = smu_system_features_control(smu, true);
1049         if (ret)
1050                 return ret;
1051
1052         if (adev->asic_type != CHIP_ARCTURUS) {
1053                 ret = smu_override_pcie_parameters(smu);
1054                 if (ret)
1055                         return ret;
1056
1057                 ret = smu_notify_display_change(smu);
1058                 if (ret)
1059                         return ret;
1060
1061                 /*
1062                  * Set min deep sleep dce fclk with bootup value from vbios via
1063                  * SetMinDeepSleepDcefclk MSG.
1064                  */
1065                 ret = smu_set_min_dcef_deep_sleep(smu);
1066                 if (ret)
1067                         return ret;
1068         }
1069
1070         /*
1071          * Set initialized values (get from vbios) to dpm tables context such as
1072          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1073          * type of clks.
1074          */
1075         if (initialize) {
1076                 ret = smu_populate_smc_tables(smu);
1077                 if (ret)
1078                         return ret;
1079
1080                 ret = smu_init_max_sustainable_clocks(smu);
1081                 if (ret)
1082                         return ret;
1083         }
1084
1085         ret = smu_set_default_od_settings(smu, initialize);
1086         if (ret)
1087                 return ret;
1088
1089         if (initialize) {
1090                 ret = smu_populate_umd_state_clk(smu);
1091                 if (ret)
1092                         return ret;
1093
1094                 ret = smu_get_power_limit(smu, &smu->default_power_limit, true, false);
1095                 if (ret)
1096                         return ret;
1097         }
1098
1099         /*
1100          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1101          */
1102         ret = smu_set_tool_table_location(smu);
1103
1104         if (!smu_is_dpm_running(smu))
1105                 pr_info("dpm has been disabled\n");
1106
1107         return ret;
1108 }
1109
1110 /**
1111  * smu_alloc_memory_pool - allocate memory pool in the system memory
1112  *
1113  * @smu: amdgpu_device pointer
1114  *
1115  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1116  * and DramLogSetDramAddr can notify it changed.
1117  *
1118  * Returns 0 on success, error on failure.
1119  */
1120 static int smu_alloc_memory_pool(struct smu_context *smu)
1121 {
1122         struct amdgpu_device *adev = smu->adev;
1123         struct smu_table_context *smu_table = &smu->smu_table;
1124         struct smu_table *memory_pool = &smu_table->memory_pool;
1125         uint64_t pool_size = smu->pool_size;
1126         int ret = 0;
1127
1128         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1129                 return ret;
1130
1131         memory_pool->size = pool_size;
1132         memory_pool->align = PAGE_SIZE;
1133         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1134
1135         switch (pool_size) {
1136         case SMU_MEMORY_POOL_SIZE_256_MB:
1137         case SMU_MEMORY_POOL_SIZE_512_MB:
1138         case SMU_MEMORY_POOL_SIZE_1_GB:
1139         case SMU_MEMORY_POOL_SIZE_2_GB:
1140                 ret = amdgpu_bo_create_kernel(adev,
1141                                               memory_pool->size,
1142                                               memory_pool->align,
1143                                               memory_pool->domain,
1144                                               &memory_pool->bo,
1145                                               &memory_pool->mc_address,
1146                                               &memory_pool->cpu_addr);
1147                 break;
1148         default:
1149                 break;
1150         }
1151
1152         return ret;
1153 }
1154
1155 static int smu_free_memory_pool(struct smu_context *smu)
1156 {
1157         struct smu_table_context *smu_table = &smu->smu_table;
1158         struct smu_table *memory_pool = &smu_table->memory_pool;
1159         int ret = 0;
1160
1161         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1162                 return ret;
1163
1164         amdgpu_bo_free_kernel(&memory_pool->bo,
1165                               &memory_pool->mc_address,
1166                               &memory_pool->cpu_addr);
1167
1168         memset(memory_pool, 0, sizeof(struct smu_table));
1169
1170         return ret;
1171 }
1172
1173 static int smu_start_smc_engine(struct smu_context *smu)
1174 {
1175         struct amdgpu_device *adev = smu->adev;
1176         int ret = 0;
1177
1178         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1179                 if (adev->asic_type < CHIP_NAVI10) {
1180                         if (smu->funcs->load_microcode) {
1181                                 ret = smu->funcs->load_microcode(smu);
1182                                 if (ret)
1183                                         return ret;
1184                         }
1185                 }
1186         }
1187
1188         if (smu->funcs->check_fw_status) {
1189                 ret = smu->funcs->check_fw_status(smu);
1190                 if (ret)
1191                         pr_err("SMC is not ready\n");
1192         }
1193
1194         return ret;
1195 }
1196
1197 static int smu_hw_init(void *handle)
1198 {
1199         int ret;
1200         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1201         struct smu_context *smu = &adev->smu;
1202
1203         ret = smu_start_smc_engine(smu);
1204         if (ret) {
1205                 pr_err("SMU is not ready yet!\n");
1206                 return ret;
1207         }
1208
1209         if (adev->flags & AMD_IS_APU) {
1210                 smu_powergate_sdma(&adev->smu, false);
1211                 smu_powergate_vcn(&adev->smu, false);
1212                 smu_set_gfx_cgpg(&adev->smu, true);
1213         }
1214
1215         if (!smu->pm_enabled)
1216                 return 0;
1217
1218         ret = smu_feature_init_dpm(smu);
1219         if (ret)
1220                 goto failed;
1221
1222         ret = smu_smc_table_hw_init(smu, true);
1223         if (ret)
1224                 goto failed;
1225
1226         ret = smu_alloc_memory_pool(smu);
1227         if (ret)
1228                 goto failed;
1229
1230         /*
1231          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1232          * pool location.
1233          */
1234         ret = smu_notify_memory_pool_location(smu);
1235         if (ret)
1236                 goto failed;
1237
1238         ret = smu_start_thermal_control(smu);
1239         if (ret)
1240                 goto failed;
1241
1242         if (!smu->pm_enabled)
1243                 adev->pm.dpm_enabled = false;
1244         else
1245                 adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1246
1247         pr_info("SMU is initialized successfully!\n");
1248
1249         return 0;
1250
1251 failed:
1252         return ret;
1253 }
1254
1255 static int smu_stop_dpms(struct smu_context *smu)
1256 {
1257         return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
1258 }
1259
1260 static int smu_hw_fini(void *handle)
1261 {
1262         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1263         struct smu_context *smu = &adev->smu;
1264         struct smu_table_context *table_context = &smu->smu_table;
1265         int ret = 0;
1266
1267         if (adev->flags & AMD_IS_APU) {
1268                 smu_powergate_sdma(&adev->smu, true);
1269                 smu_powergate_vcn(&adev->smu, true);
1270         }
1271
1272         ret = smu_stop_thermal_control(smu);
1273         if (ret) {
1274                 pr_warn("Fail to stop thermal control!\n");
1275                 return ret;
1276         }
1277
1278         ret = smu_stop_dpms(smu);
1279         if (ret) {
1280                 pr_warn("Fail to stop Dpms!\n");
1281                 return ret;
1282         }
1283
1284         kfree(table_context->driver_pptable);
1285         table_context->driver_pptable = NULL;
1286
1287         kfree(table_context->max_sustainable_clocks);
1288         table_context->max_sustainable_clocks = NULL;
1289
1290         kfree(table_context->overdrive_table);
1291         table_context->overdrive_table = NULL;
1292
1293         ret = smu_fini_fb_allocations(smu);
1294         if (ret)
1295                 return ret;
1296
1297         ret = smu_free_memory_pool(smu);
1298         if (ret)
1299                 return ret;
1300
1301         return 0;
1302 }
1303
1304 int smu_reset(struct smu_context *smu)
1305 {
1306         struct amdgpu_device *adev = smu->adev;
1307         int ret = 0;
1308
1309         ret = smu_hw_fini(adev);
1310         if (ret)
1311                 return ret;
1312
1313         ret = smu_hw_init(adev);
1314         if (ret)
1315                 return ret;
1316
1317         return ret;
1318 }
1319
1320 static int smu_suspend(void *handle)
1321 {
1322         int ret;
1323         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1324         struct smu_context *smu = &adev->smu;
1325         bool baco_feature_is_enabled = false;
1326
1327         if(!(adev->flags & AMD_IS_APU))
1328                 baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
1329
1330         ret = smu_system_features_control(smu, false);
1331         if (ret)
1332                 return ret;
1333
1334         if (adev->in_gpu_reset && baco_feature_is_enabled) {
1335                 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1336                 if (ret) {
1337                         pr_warn("set BACO feature enabled failed, return %d\n", ret);
1338                         return ret;
1339                 }
1340         }
1341
1342         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1343
1344         if (adev->asic_type >= CHIP_NAVI10 &&
1345             adev->gfx.rlc.funcs->stop)
1346                 adev->gfx.rlc.funcs->stop(adev);
1347
1348         return 0;
1349 }
1350
1351 static int smu_resume(void *handle)
1352 {
1353         int ret;
1354         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1355         struct smu_context *smu = &adev->smu;
1356
1357         pr_info("SMU is resuming...\n");
1358
1359         ret = smu_start_smc_engine(smu);
1360         if (ret) {
1361                 pr_err("SMU is not ready yet!\n");
1362                 goto failed;
1363         }
1364
1365         ret = smu_smc_table_hw_init(smu, false);
1366         if (ret)
1367                 goto failed;
1368
1369         ret = smu_start_thermal_control(smu);
1370         if (ret)
1371                 goto failed;
1372
1373         if (smu->is_apu)
1374                 smu_set_gfx_cgpg(&adev->smu, true);
1375
1376         smu->disable_uclk_switch = 0;
1377
1378         pr_info("SMU is resumed successfully!\n");
1379
1380         return 0;
1381
1382 failed:
1383         return ret;
1384 }
1385
1386 int smu_display_configuration_change(struct smu_context *smu,
1387                                      const struct amd_pp_display_configuration *display_config)
1388 {
1389         int index = 0;
1390         int num_of_active_display = 0;
1391
1392         if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1393                 return -EINVAL;
1394
1395         if (!display_config)
1396                 return -EINVAL;
1397
1398         mutex_lock(&smu->mutex);
1399
1400         if (smu->funcs->set_deep_sleep_dcefclk)
1401                 smu->funcs->set_deep_sleep_dcefclk(smu,
1402                                 display_config->min_dcef_deep_sleep_set_clk / 100);
1403
1404         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1405                 if (display_config->displays[index].controller_id != 0)
1406                         num_of_active_display++;
1407         }
1408
1409         smu_set_active_display_count(smu, num_of_active_display);
1410
1411         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1412                            display_config->cpu_cc6_disable,
1413                            display_config->cpu_pstate_disable,
1414                            display_config->nb_pstate_switch_disable);
1415
1416         mutex_unlock(&smu->mutex);
1417
1418         return 0;
1419 }
1420
1421 static int smu_get_clock_info(struct smu_context *smu,
1422                               struct smu_clock_info *clk_info,
1423                               enum smu_perf_level_designation designation)
1424 {
1425         int ret;
1426         struct smu_performance_level level = {0};
1427
1428         if (!clk_info)
1429                 return -EINVAL;
1430
1431         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1432         if (ret)
1433                 return -EINVAL;
1434
1435         clk_info->min_mem_clk = level.memory_clock;
1436         clk_info->min_eng_clk = level.core_clock;
1437         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1438
1439         ret = smu_get_perf_level(smu, designation, &level);
1440         if (ret)
1441                 return -EINVAL;
1442
1443         clk_info->min_mem_clk = level.memory_clock;
1444         clk_info->min_eng_clk = level.core_clock;
1445         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1446
1447         return 0;
1448 }
1449
1450 int smu_get_current_clocks(struct smu_context *smu,
1451                            struct amd_pp_clock_info *clocks)
1452 {
1453         struct amd_pp_simple_clock_info simple_clocks = {0};
1454         struct smu_clock_info hw_clocks;
1455         int ret = 0;
1456
1457         if (!is_support_sw_smu(smu->adev))
1458                 return -EINVAL;
1459
1460         mutex_lock(&smu->mutex);
1461
1462         smu_get_dal_power_level(smu, &simple_clocks);
1463
1464         if (smu->support_power_containment)
1465                 ret = smu_get_clock_info(smu, &hw_clocks,
1466                                          PERF_LEVEL_POWER_CONTAINMENT);
1467         else
1468                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1469
1470         if (ret) {
1471                 pr_err("Error in smu_get_clock_info\n");
1472                 goto failed;
1473         }
1474
1475         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1476         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1477         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1478         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1479         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1480         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1481         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1482         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1483
1484         if (simple_clocks.level == 0)
1485                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1486         else
1487                 clocks->max_clocks_state = simple_clocks.level;
1488
1489         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1490                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1491                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1492         }
1493
1494 failed:
1495         mutex_unlock(&smu->mutex);
1496         return ret;
1497 }
1498
1499 static int smu_set_clockgating_state(void *handle,
1500                                      enum amd_clockgating_state state)
1501 {
1502         return 0;
1503 }
1504
1505 static int smu_set_powergating_state(void *handle,
1506                                      enum amd_powergating_state state)
1507 {
1508         return 0;
1509 }
1510
1511 static int smu_enable_umd_pstate(void *handle,
1512                       enum amd_dpm_forced_level *level)
1513 {
1514         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1515                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1516                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1517                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1518
1519         struct smu_context *smu = (struct smu_context*)(handle);
1520         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1521
1522         if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
1523                 return -EINVAL;
1524
1525         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1526                 /* enter umd pstate, save current level, disable gfx cg*/
1527                 if (*level & profile_mode_mask) {
1528                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1529                         smu_dpm_ctx->enable_umd_pstate = true;
1530                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1531                                                                AMD_IP_BLOCK_TYPE_GFX,
1532                                                                AMD_CG_STATE_UNGATE);
1533                         amdgpu_device_ip_set_powergating_state(smu->adev,
1534                                                                AMD_IP_BLOCK_TYPE_GFX,
1535                                                                AMD_PG_STATE_UNGATE);
1536                 }
1537         } else {
1538                 /* exit umd pstate, restore level, enable gfx cg*/
1539                 if (!(*level & profile_mode_mask)) {
1540                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1541                                 *level = smu_dpm_ctx->saved_dpm_level;
1542                         smu_dpm_ctx->enable_umd_pstate = false;
1543                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1544                                                                AMD_IP_BLOCK_TYPE_GFX,
1545                                                                AMD_CG_STATE_GATE);
1546                         amdgpu_device_ip_set_powergating_state(smu->adev,
1547                                                                AMD_IP_BLOCK_TYPE_GFX,
1548                                                                AMD_PG_STATE_GATE);
1549                 }
1550         }
1551
1552         return 0;
1553 }
1554
1555 static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1556 {
1557         int ret = 0;
1558         uint32_t sclk_mask, mclk_mask, soc_mask;
1559
1560         switch (level) {
1561         case AMD_DPM_FORCED_LEVEL_HIGH:
1562                 ret = smu_force_dpm_limit_value(smu, true);
1563                 break;
1564         case AMD_DPM_FORCED_LEVEL_LOW:
1565                 ret = smu_force_dpm_limit_value(smu, false);
1566                 break;
1567         case AMD_DPM_FORCED_LEVEL_AUTO:
1568         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1569                 ret = smu_unforce_dpm_levels(smu);
1570                 break;
1571         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1572         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1573         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1574                 ret = smu_get_profiling_clk_mask(smu, level,
1575                                                  &sclk_mask,
1576                                                  &mclk_mask,
1577                                                  &soc_mask);
1578                 if (ret)
1579                         return ret;
1580                 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
1581                 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
1582                 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
1583                 break;
1584         case AMD_DPM_FORCED_LEVEL_MANUAL:
1585         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1586         default:
1587                 break;
1588         }
1589         return ret;
1590 }
1591
1592 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1593                                    enum amd_dpm_forced_level level,
1594                                    bool skip_display_settings)
1595 {
1596         int ret = 0;
1597         int index = 0;
1598         long workload;
1599         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1600
1601         if (!smu->pm_enabled)
1602                 return -EINVAL;
1603
1604         if (!skip_display_settings) {
1605                 ret = smu_display_config_changed(smu);
1606                 if (ret) {
1607                         pr_err("Failed to change display config!");
1608                         return ret;
1609                 }
1610         }
1611
1612         ret = smu_apply_clocks_adjust_rules(smu);
1613         if (ret) {
1614                 pr_err("Failed to apply clocks adjust rules!");
1615                 return ret;
1616         }
1617
1618         if (!skip_display_settings) {
1619                 ret = smu_notify_smc_dispaly_config(smu);
1620                 if (ret) {
1621                         pr_err("Failed to notify smc display config!");
1622                         return ret;
1623                 }
1624         }
1625
1626         if (smu_dpm_ctx->dpm_level != level) {
1627                 ret = smu_asic_set_performance_level(smu, level);
1628                 if (ret) {
1629                         ret = smu_default_set_performance_level(smu, level);
1630                         if (ret) {
1631                                 pr_err("Failed to set performance level!");
1632                                 return ret;
1633                         }
1634                 }
1635
1636                 /* update the saved copy */
1637                 smu_dpm_ctx->dpm_level = level;
1638         }
1639
1640         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1641                 index = fls(smu->workload_mask);
1642                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1643                 workload = smu->workload_setting[index];
1644
1645                 if (smu->power_profile_mode != workload)
1646                         smu_set_power_profile_mode(smu, &workload, 0, false);
1647         }
1648
1649         return ret;
1650 }
1651
1652 int smu_handle_task(struct smu_context *smu,
1653                     enum amd_dpm_forced_level level,
1654                     enum amd_pp_task task_id,
1655                     bool lock_needed)
1656 {
1657         int ret = 0;
1658
1659         if (lock_needed)
1660                 mutex_lock(&smu->mutex);
1661
1662         switch (task_id) {
1663         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1664                 ret = smu_pre_display_config_changed(smu);
1665                 if (ret)
1666                         goto out;
1667                 ret = smu_set_cpu_power_state(smu);
1668                 if (ret)
1669                         goto out;
1670                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1671                 break;
1672         case AMD_PP_TASK_COMPLETE_INIT:
1673         case AMD_PP_TASK_READJUST_POWER_STATE:
1674                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1675                 break;
1676         default:
1677                 break;
1678         }
1679
1680 out:
1681         if (lock_needed)
1682                 mutex_unlock(&smu->mutex);
1683
1684         return ret;
1685 }
1686
1687 int smu_switch_power_profile(struct smu_context *smu,
1688                              enum PP_SMC_POWER_PROFILE type,
1689                              bool en)
1690 {
1691         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1692         long workload;
1693         uint32_t index;
1694
1695         if (!smu->pm_enabled)
1696                 return -EINVAL;
1697
1698         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1699                 return -EINVAL;
1700
1701         mutex_lock(&smu->mutex);
1702
1703         if (!en) {
1704                 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1705                 index = fls(smu->workload_mask);
1706                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1707                 workload = smu->workload_setting[index];
1708         } else {
1709                 smu->workload_mask |= (1 << smu->workload_prority[type]);
1710                 index = fls(smu->workload_mask);
1711                 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1712                 workload = smu->workload_setting[index];
1713         }
1714
1715         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1716                 smu_set_power_profile_mode(smu, &workload, 0, false);
1717
1718         mutex_unlock(&smu->mutex);
1719
1720         return 0;
1721 }
1722
1723 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1724 {
1725         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1726         enum amd_dpm_forced_level level;
1727
1728         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1729                 return -EINVAL;
1730
1731         mutex_lock(&(smu->mutex));
1732         level = smu_dpm_ctx->dpm_level;
1733         mutex_unlock(&(smu->mutex));
1734
1735         return level;
1736 }
1737
1738 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1739 {
1740         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1741         int ret = 0;
1742
1743         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1744                 return -EINVAL;
1745
1746         mutex_lock(&smu->mutex);
1747
1748         ret = smu_enable_umd_pstate(smu, &level);
1749         if (ret) {
1750                 mutex_unlock(&smu->mutex);
1751                 return ret;
1752         }
1753
1754         ret = smu_handle_task(smu, level,
1755                               AMD_PP_TASK_READJUST_POWER_STATE,
1756                               false);
1757
1758         mutex_unlock(&smu->mutex);
1759
1760         return ret;
1761 }
1762
1763 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1764 {
1765         int ret = 0;
1766
1767         mutex_lock(&smu->mutex);
1768         ret = smu_init_display_count(smu, count);
1769         mutex_unlock(&smu->mutex);
1770
1771         return ret;
1772 }
1773
1774 int smu_force_clk_levels(struct smu_context *smu,
1775                          enum smu_clk_type clk_type,
1776                          uint32_t mask,
1777                          bool lock_needed)
1778 {
1779         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1780         int ret = 0;
1781
1782         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1783                 pr_debug("force clock level is for dpm manual mode only.\n");
1784                 return -EINVAL;
1785         }
1786
1787         if (lock_needed)
1788                 mutex_lock(&smu->mutex);
1789
1790         if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1791                 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1792
1793         if (lock_needed)
1794                 mutex_unlock(&smu->mutex);
1795
1796         return ret;
1797 }
1798
1799 int smu_set_mp1_state(struct smu_context *smu,
1800                       enum pp_mp1_state mp1_state)
1801 {
1802         uint16_t msg;
1803         int ret;
1804
1805         /*
1806          * The SMC is not fully ready. That may be
1807          * expected as the IP may be masked.
1808          * So, just return without error.
1809          */
1810         if (!smu->pm_enabled)
1811                 return 0;
1812
1813         mutex_lock(&smu->mutex);
1814
1815         switch (mp1_state) {
1816         case PP_MP1_STATE_SHUTDOWN:
1817                 msg = SMU_MSG_PrepareMp1ForShutdown;
1818                 break;
1819         case PP_MP1_STATE_UNLOAD:
1820                 msg = SMU_MSG_PrepareMp1ForUnload;
1821                 break;
1822         case PP_MP1_STATE_RESET:
1823                 msg = SMU_MSG_PrepareMp1ForReset;
1824                 break;
1825         case PP_MP1_STATE_NONE:
1826         default:
1827                 mutex_unlock(&smu->mutex);
1828                 return 0;
1829         }
1830
1831         /* some asics may not support those messages */
1832         if (smu_msg_get_index(smu, msg) < 0) {
1833                 mutex_unlock(&smu->mutex);
1834                 return 0;
1835         }
1836
1837         ret = smu_send_smc_msg(smu, msg);
1838         if (ret)
1839                 pr_err("[PrepareMp1] Failed!\n");
1840
1841         mutex_unlock(&smu->mutex);
1842
1843         return ret;
1844 }
1845
1846 int smu_set_df_cstate(struct smu_context *smu,
1847                       enum pp_df_cstate state)
1848 {
1849         int ret = 0;
1850
1851         /*
1852          * The SMC is not fully ready. That may be
1853          * expected as the IP may be masked.
1854          * So, just return without error.
1855          */
1856         if (!smu->pm_enabled)
1857                 return 0;
1858
1859         if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1860                 return 0;
1861
1862         mutex_lock(&smu->mutex);
1863
1864         ret = smu->ppt_funcs->set_df_cstate(smu, state);
1865         if (ret)
1866                 pr_err("[SetDfCstate] failed!\n");
1867
1868         mutex_unlock(&smu->mutex);
1869
1870         return ret;
1871 }
1872
1873 int smu_write_watermarks_table(struct smu_context *smu)
1874 {
1875         int ret = 0;
1876         struct smu_table_context *smu_table = &smu->smu_table;
1877         struct smu_table *table = NULL;
1878
1879         table = &smu_table->tables[SMU_TABLE_WATERMARKS];
1880
1881         if (!table->cpu_addr)
1882                 return -EINVAL;
1883
1884         ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
1885                                 true);
1886
1887         return ret;
1888 }
1889
1890 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
1891                 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
1892 {
1893         int ret = 0;
1894         struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
1895         void *table = watermarks->cpu_addr;
1896
1897         mutex_lock(&smu->mutex);
1898
1899         if (!smu->disable_watermark &&
1900                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1901                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1902                 smu_set_watermarks_table(smu, table, clock_ranges);
1903                 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1904                 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1905         }
1906
1907         mutex_unlock(&smu->mutex);
1908
1909         return ret;
1910 }
1911
1912 const struct amd_ip_funcs smu_ip_funcs = {
1913         .name = "smu",
1914         .early_init = smu_early_init,
1915         .late_init = smu_late_init,
1916         .sw_init = smu_sw_init,
1917         .sw_fini = smu_sw_fini,
1918         .hw_init = smu_hw_init,
1919         .hw_fini = smu_hw_fini,
1920         .suspend = smu_suspend,
1921         .resume = smu_resume,
1922         .is_idle = NULL,
1923         .check_soft_reset = NULL,
1924         .wait_for_idle = NULL,
1925         .soft_reset = NULL,
1926         .set_clockgating_state = smu_set_clockgating_state,
1927         .set_powergating_state = smu_set_powergating_state,
1928         .enable_umd_pstate = smu_enable_umd_pstate,
1929 };
1930
1931 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1932 {
1933         .type = AMD_IP_BLOCK_TYPE_SMC,
1934         .major = 11,
1935         .minor = 0,
1936         .rev = 0,
1937         .funcs = &smu_ip_funcs,
1938 };
1939
1940 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
1941 {
1942         .type = AMD_IP_BLOCK_TYPE_SMC,
1943         .major = 12,
1944         .minor = 0,
1945         .rev = 0,
1946         .funcs = &smu_ip_funcs,
1947 };
1948
1949 int smu_load_microcode(struct smu_context *smu)
1950 {
1951         int ret = 0;
1952
1953         mutex_lock(&smu->mutex);
1954
1955         if (smu->funcs->load_microcode)
1956                 ret = smu->funcs->load_microcode(smu);
1957
1958         mutex_unlock(&smu->mutex);
1959
1960         return ret;
1961 }
1962
1963 int smu_check_fw_status(struct smu_context *smu)
1964 {
1965         int ret = 0;
1966
1967         mutex_lock(&smu->mutex);
1968
1969         if (smu->funcs->check_fw_status)
1970                 ret = smu->funcs->check_fw_status(smu);
1971
1972         mutex_unlock(&smu->mutex);
1973
1974         return ret;
1975 }
1976
1977 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
1978 {
1979         int ret = 0;
1980
1981         mutex_lock(&smu->mutex);
1982
1983         if (smu->funcs->set_gfx_cgpg)
1984                 ret = smu->funcs->set_gfx_cgpg(smu, enabled);
1985
1986         mutex_unlock(&smu->mutex);
1987
1988         return ret;
1989 }
1990
1991 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
1992 {
1993         int ret = 0;
1994
1995         mutex_lock(&smu->mutex);
1996
1997         if (smu->funcs->set_fan_speed_rpm)
1998                 ret = smu->funcs->set_fan_speed_rpm(smu, speed);
1999
2000         mutex_unlock(&smu->mutex);
2001
2002         return ret;
2003 }
2004
2005 int smu_get_power_limit(struct smu_context *smu,
2006                         uint32_t *limit,
2007                         bool def,
2008                         bool lock_needed)
2009 {
2010         int ret = 0;
2011
2012         if (lock_needed)
2013                 mutex_lock(&smu->mutex);
2014
2015         if (smu->ppt_funcs->get_power_limit)
2016                 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2017
2018         if (lock_needed)
2019                 mutex_unlock(&smu->mutex);
2020
2021         return ret;
2022 }
2023
2024 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2025 {
2026         int ret = 0;
2027
2028         mutex_lock(&smu->mutex);
2029
2030         if (smu->funcs->set_power_limit)
2031                 ret = smu->funcs->set_power_limit(smu, limit);
2032
2033         mutex_unlock(&smu->mutex);
2034
2035         return ret;
2036 }
2037
2038 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2039 {
2040         int ret = 0;
2041
2042         mutex_lock(&smu->mutex);
2043
2044         if (smu->ppt_funcs->print_clk_levels)
2045                 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2046
2047         mutex_unlock(&smu->mutex);
2048
2049         return ret;
2050 }
2051
2052 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2053 {
2054         int ret = 0;
2055
2056         mutex_lock(&smu->mutex);
2057
2058         if (smu->ppt_funcs->get_od_percentage)
2059                 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2060
2061         mutex_unlock(&smu->mutex);
2062
2063         return ret;
2064 }
2065
2066 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2067 {
2068         int ret = 0;
2069
2070         mutex_lock(&smu->mutex);
2071
2072         if (smu->ppt_funcs->set_od_percentage)
2073                 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2074
2075         mutex_unlock(&smu->mutex);
2076
2077         return ret;
2078 }
2079
2080 int smu_od_edit_dpm_table(struct smu_context *smu,
2081                           enum PP_OD_DPM_TABLE_COMMAND type,
2082                           long *input, uint32_t size)
2083 {
2084         int ret = 0;
2085
2086         mutex_lock(&smu->mutex);
2087
2088         if (smu->ppt_funcs->od_edit_dpm_table)
2089                 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2090
2091         mutex_unlock(&smu->mutex);
2092
2093         return ret;
2094 }
2095
2096 int smu_read_sensor(struct smu_context *smu,
2097                     enum amd_pp_sensors sensor,
2098                     void *data, uint32_t *size)
2099 {
2100         int ret = 0;
2101
2102         mutex_lock(&smu->mutex);
2103
2104         if (smu->ppt_funcs->read_sensor)
2105                 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2106
2107         mutex_unlock(&smu->mutex);
2108
2109         return ret;
2110 }
2111
2112 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2113 {
2114         int ret = 0;
2115
2116         mutex_lock(&smu->mutex);
2117
2118         if (smu->ppt_funcs->get_power_profile_mode)
2119                 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2120
2121         mutex_unlock(&smu->mutex);
2122
2123         return ret;
2124 }
2125
2126 int smu_set_power_profile_mode(struct smu_context *smu,
2127                                long *param,
2128                                uint32_t param_size,
2129                                bool lock_needed)
2130 {
2131         int ret = 0;
2132
2133         if (lock_needed)
2134                 mutex_lock(&smu->mutex);
2135
2136         if (smu->ppt_funcs->set_power_profile_mode)
2137                 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2138
2139         if (lock_needed)
2140                 mutex_unlock(&smu->mutex);
2141
2142         return ret;
2143 }
2144
2145
2146 int smu_get_fan_control_mode(struct smu_context *smu)
2147 {
2148         int ret = 0;
2149
2150         mutex_lock(&smu->mutex);
2151
2152         if (smu->funcs->get_fan_control_mode)
2153                 ret = smu->funcs->get_fan_control_mode(smu);
2154
2155         mutex_unlock(&smu->mutex);
2156
2157         return ret;
2158 }
2159
2160 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2161 {
2162         int ret = 0;
2163
2164         mutex_lock(&smu->mutex);
2165
2166         if (smu->funcs->set_fan_control_mode)
2167                 ret = smu->funcs->set_fan_control_mode(smu, value);
2168
2169         mutex_unlock(&smu->mutex);
2170
2171         return ret;
2172 }
2173
2174 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2175 {
2176         int ret = 0;
2177
2178         mutex_lock(&smu->mutex);
2179
2180         if (smu->ppt_funcs->get_fan_speed_percent)
2181                 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2182
2183         mutex_unlock(&smu->mutex);
2184
2185         return ret;
2186 }
2187
2188 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2189 {
2190         int ret = 0;
2191
2192         mutex_lock(&smu->mutex);
2193
2194         if (smu->funcs->set_fan_speed_percent)
2195                 ret = smu->funcs->set_fan_speed_percent(smu, speed);
2196
2197         mutex_unlock(&smu->mutex);
2198
2199         return ret;
2200 }
2201
2202 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2203 {
2204         int ret = 0;
2205
2206         mutex_lock(&smu->mutex);
2207
2208         if (smu->ppt_funcs->get_fan_speed_rpm)
2209                 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2210
2211         mutex_unlock(&smu->mutex);
2212
2213         return ret;
2214 }
2215
2216 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2217 {
2218         int ret = 0;
2219
2220         mutex_lock(&smu->mutex);
2221
2222         if (smu->funcs->set_deep_sleep_dcefclk)
2223                 ret = smu->funcs->set_deep_sleep_dcefclk(smu, clk);
2224
2225         mutex_unlock(&smu->mutex);
2226
2227         return ret;
2228 }
2229
2230 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2231 {
2232         int ret = 0;
2233
2234         mutex_lock(&smu->mutex);
2235
2236         if (smu->funcs->set_active_display_count)
2237                 ret = smu->funcs->set_active_display_count(smu, count);
2238
2239         mutex_unlock(&smu->mutex);
2240
2241         return ret;
2242 }
2243
2244 int smu_get_clock_by_type(struct smu_context *smu,
2245                           enum amd_pp_clock_type type,
2246                           struct amd_pp_clocks *clocks)
2247 {
2248         int ret = 0;
2249
2250         mutex_lock(&smu->mutex);
2251
2252         if (smu->funcs->get_clock_by_type)
2253                 ret = smu->funcs->get_clock_by_type(smu, type, clocks);
2254
2255         mutex_unlock(&smu->mutex);
2256
2257         return ret;
2258 }
2259
2260 int smu_get_max_high_clocks(struct smu_context *smu,
2261                             struct amd_pp_simple_clock_info *clocks)
2262 {
2263         int ret = 0;
2264
2265         mutex_lock(&smu->mutex);
2266
2267         if (smu->funcs->get_max_high_clocks)
2268                 ret = smu->funcs->get_max_high_clocks(smu, clocks);
2269
2270         mutex_unlock(&smu->mutex);
2271
2272         return ret;
2273 }
2274
2275 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2276                                        enum smu_clk_type clk_type,
2277                                        struct pp_clock_levels_with_latency *clocks)
2278 {
2279         int ret = 0;
2280
2281         mutex_lock(&smu->mutex);
2282
2283         if (smu->ppt_funcs->get_clock_by_type_with_latency)
2284                 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2285
2286         mutex_unlock(&smu->mutex);
2287
2288         return ret;
2289 }
2290
2291 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2292                                        enum amd_pp_clock_type type,
2293                                        struct pp_clock_levels_with_voltage *clocks)
2294 {
2295         int ret = 0;
2296
2297         mutex_lock(&smu->mutex);
2298
2299         if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2300                 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2301
2302         mutex_unlock(&smu->mutex);
2303
2304         return ret;
2305 }
2306
2307
2308 int smu_display_clock_voltage_request(struct smu_context *smu,
2309                                       struct pp_display_clock_request *clock_req)
2310 {
2311         int ret = 0;
2312
2313         mutex_lock(&smu->mutex);
2314
2315         if (smu->funcs->display_clock_voltage_request)
2316                 ret = smu->funcs->display_clock_voltage_request(smu, clock_req);
2317
2318         mutex_unlock(&smu->mutex);
2319
2320         return ret;
2321 }
2322
2323
2324 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2325 {
2326         int ret = -EINVAL;
2327
2328         mutex_lock(&smu->mutex);
2329
2330         if (smu->ppt_funcs->display_disable_memory_clock_switch)
2331                 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2332
2333         mutex_unlock(&smu->mutex);
2334
2335         return ret;
2336 }
2337
2338 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2339 {
2340         int ret = 0;
2341
2342         mutex_lock(&smu->mutex);
2343
2344         if (smu->funcs->notify_smu_enable_pwe)
2345                 ret = smu->funcs->notify_smu_enable_pwe(smu);
2346
2347         mutex_unlock(&smu->mutex);
2348
2349         return ret;
2350 }
2351
2352 int smu_set_xgmi_pstate(struct smu_context *smu,
2353                         uint32_t pstate)
2354 {
2355         int ret = 0;
2356
2357         mutex_lock(&smu->mutex);
2358
2359         if (smu->funcs->set_xgmi_pstate)
2360                 ret = smu->funcs->set_xgmi_pstate(smu, pstate);
2361
2362         mutex_unlock(&smu->mutex);
2363
2364         return ret;
2365 }
2366
2367 int smu_set_azalia_d3_pme(struct smu_context *smu)
2368 {
2369         int ret = 0;
2370
2371         mutex_lock(&smu->mutex);
2372
2373         if (smu->funcs->set_azalia_d3_pme)
2374                 ret = smu->funcs->set_azalia_d3_pme(smu);
2375
2376         mutex_unlock(&smu->mutex);
2377
2378         return ret;
2379 }
2380
2381 bool smu_baco_is_support(struct smu_context *smu)
2382 {
2383         bool ret = false;
2384
2385         mutex_lock(&smu->mutex);
2386
2387         if (smu->funcs->baco_is_support)
2388                 ret = smu->funcs->baco_is_support(smu);
2389
2390         mutex_unlock(&smu->mutex);
2391
2392         return ret;
2393 }
2394
2395 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2396 {
2397         if (smu->funcs->baco_get_state)
2398                 return -EINVAL;
2399
2400         mutex_lock(&smu->mutex);
2401         *state = smu->funcs->baco_get_state(smu);
2402         mutex_unlock(&smu->mutex);
2403
2404         return 0;
2405 }
2406
2407 int smu_baco_reset(struct smu_context *smu)
2408 {
2409         int ret = 0;
2410
2411         mutex_lock(&smu->mutex);
2412
2413         if (smu->funcs->baco_reset)
2414                 ret = smu->funcs->baco_reset(smu);
2415
2416         mutex_unlock(&smu->mutex);
2417
2418         return ret;
2419 }
2420
2421 int smu_mode2_reset(struct smu_context *smu)
2422 {
2423         int ret = 0;
2424
2425         mutex_lock(&smu->mutex);
2426
2427         if (smu->funcs->mode2_reset)
2428                 ret = smu->funcs->mode2_reset(smu);
2429
2430         mutex_unlock(&smu->mutex);
2431
2432         return ret;
2433 }
2434
2435 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2436                                          struct pp_smu_nv_clock_table *max_clocks)
2437 {
2438         int ret = 0;
2439
2440         mutex_lock(&smu->mutex);
2441
2442         if (smu->funcs->get_max_sustainable_clocks_by_dc)
2443                 ret = smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2444
2445         mutex_unlock(&smu->mutex);
2446
2447         return ret;
2448 }
2449
2450 int smu_get_uclk_dpm_states(struct smu_context *smu,
2451                             unsigned int *clock_values_in_khz,
2452                             unsigned int *num_states)
2453 {
2454         int ret = 0;
2455
2456         mutex_lock(&smu->mutex);
2457
2458         if (smu->ppt_funcs->get_uclk_dpm_states)
2459                 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2460
2461         mutex_unlock(&smu->mutex);
2462
2463         return ret;
2464 }
2465
2466 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2467 {
2468         enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2469
2470         mutex_lock(&smu->mutex);
2471
2472         if (smu->ppt_funcs->get_current_power_state)
2473                 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2474
2475         mutex_unlock(&smu->mutex);
2476
2477         return pm_state;
2478 }
2479
2480 int smu_get_dpm_clock_table(struct smu_context *smu,
2481                             struct dpm_clocks *clock_table)
2482 {
2483         int ret = 0;
2484
2485         mutex_lock(&smu->mutex);
2486
2487         if (smu->ppt_funcs->get_dpm_clock_table)
2488                 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2489
2490         mutex_unlock(&smu->mutex);
2491
2492         return ret;
2493 }