]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
amd/amdgpu/sriov tdr enablement with pp_onevf_mode
[linux.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/firmware.h>
24
25 #include "pp_debug.h"
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "soc15_common.h"
30 #include "smu_v11_0.h"
31 #include "smu_v12_0.h"
32 #include "atom.h"
33 #include "amd_pcie.h"
34 #include "vega20_ppt.h"
35 #include "arcturus_ppt.h"
36 #include "navi10_ppt.h"
37 #include "renoir_ppt.h"
38
39 #undef __SMU_DUMMY_MAP
40 #define __SMU_DUMMY_MAP(type)   #type
41 static const char* __smu_message_names[] = {
42         SMU_MESSAGE_TYPES
43 };
44
45 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
46 {
47         if (type < 0 || type >= SMU_MSG_MAX_COUNT)
48                 return "unknown smu message";
49         return __smu_message_names[type];
50 }
51
52 #undef __SMU_DUMMY_MAP
53 #define __SMU_DUMMY_MAP(fea)    #fea
54 static const char* __smu_feature_names[] = {
55         SMU_FEATURE_MASKS
56 };
57
58 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
59 {
60         if (feature < 0 || feature >= SMU_FEATURE_COUNT)
61                 return "unknown smu feature";
62         return __smu_feature_names[feature];
63 }
64
65 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
66 {
67         size_t size = 0;
68         int ret = 0, i = 0;
69         uint32_t feature_mask[2] = { 0 };
70         int32_t feature_index = 0;
71         uint32_t count = 0;
72         uint32_t sort_feature[SMU_FEATURE_COUNT];
73         uint64_t hw_feature_count = 0;
74
75         mutex_lock(&smu->mutex);
76
77         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
78         if (ret)
79                 goto failed;
80
81         size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
82                         feature_mask[1], feature_mask[0]);
83
84         for (i = 0; i < SMU_FEATURE_COUNT; i++) {
85                 feature_index = smu_feature_get_index(smu, i);
86                 if (feature_index < 0)
87                         continue;
88                 sort_feature[feature_index] = i;
89                 hw_feature_count++;
90         }
91
92         for (i = 0; i < hw_feature_count; i++) {
93                 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
94                                count++,
95                                smu_get_feature_name(smu, sort_feature[i]),
96                                i,
97                                !!smu_feature_is_enabled(smu, sort_feature[i]) ?
98                                "enabled" : "disabled");
99         }
100
101 failed:
102         mutex_unlock(&smu->mutex);
103
104         return size;
105 }
106
107 static int smu_feature_update_enable_state(struct smu_context *smu,
108                                            uint64_t feature_mask,
109                                            bool enabled)
110 {
111         struct smu_feature *feature = &smu->smu_feature;
112         uint32_t feature_low = 0, feature_high = 0;
113         int ret = 0;
114
115         if (!smu->pm_enabled)
116                 return ret;
117
118         feature_low = (feature_mask >> 0 ) & 0xffffffff;
119         feature_high = (feature_mask >> 32) & 0xffffffff;
120
121         if (enabled) {
122                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
123                                                   feature_low);
124                 if (ret)
125                         return ret;
126                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
127                                                   feature_high);
128                 if (ret)
129                         return ret;
130         } else {
131                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
132                                                   feature_low);
133                 if (ret)
134                         return ret;
135                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
136                                                   feature_high);
137                 if (ret)
138                         return ret;
139         }
140
141         mutex_lock(&feature->mutex);
142         if (enabled)
143                 bitmap_or(feature->enabled, feature->enabled,
144                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
145         else
146                 bitmap_andnot(feature->enabled, feature->enabled,
147                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
148         mutex_unlock(&feature->mutex);
149
150         return ret;
151 }
152
153 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
154 {
155         int ret = 0;
156         uint32_t feature_mask[2] = { 0 };
157         uint64_t feature_2_enabled = 0;
158         uint64_t feature_2_disabled = 0;
159         uint64_t feature_enables = 0;
160
161         mutex_lock(&smu->mutex);
162
163         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
164         if (ret)
165                 goto out;
166
167         feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
168
169         feature_2_enabled  = ~feature_enables & new_mask;
170         feature_2_disabled = feature_enables & ~new_mask;
171
172         if (feature_2_enabled) {
173                 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
174                 if (ret)
175                         goto out;
176         }
177         if (feature_2_disabled) {
178                 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
179                 if (ret)
180                         goto out;
181         }
182
183 out:
184         mutex_unlock(&smu->mutex);
185
186         return ret;
187 }
188
189 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
190 {
191         int ret = 0;
192
193         if (!if_version && !smu_version)
194                 return -EINVAL;
195
196         if (if_version) {
197                 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
198                 if (ret)
199                         return ret;
200
201                 ret = smu_read_smc_arg(smu, if_version);
202                 if (ret)
203                         return ret;
204         }
205
206         if (smu_version) {
207                 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
208                 if (ret)
209                         return ret;
210
211                 ret = smu_read_smc_arg(smu, smu_version);
212                 if (ret)
213                         return ret;
214         }
215
216         return ret;
217 }
218
219 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
220                             uint32_t min, uint32_t max)
221 {
222         int ret = 0;
223
224         if (min <= 0 && max <= 0)
225                 return -EINVAL;
226
227         if (!smu_clk_dpm_is_enabled(smu, clk_type))
228                 return 0;
229
230         ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
231         return ret;
232 }
233
234 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
235                             uint32_t min, uint32_t max)
236 {
237         int ret = 0, clk_id = 0;
238         uint32_t param;
239
240         if (min <= 0 && max <= 0)
241                 return -EINVAL;
242
243         if (!smu_clk_dpm_is_enabled(smu, clk_type))
244                 return 0;
245
246         clk_id = smu_clk_get_index(smu, clk_type);
247         if (clk_id < 0)
248                 return clk_id;
249
250         if (max > 0) {
251                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
252                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
253                                                   param);
254                 if (ret)
255                         return ret;
256         }
257
258         if (min > 0) {
259                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
260                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
261                                                   param);
262                 if (ret)
263                         return ret;
264         }
265
266
267         return ret;
268 }
269
270 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
271                            uint32_t *min, uint32_t *max, bool lock_needed)
272 {
273         uint32_t clock_limit;
274         int ret = 0;
275
276         if (!min && !max)
277                 return -EINVAL;
278
279         if (lock_needed)
280                 mutex_lock(&smu->mutex);
281
282         if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
283                 switch (clk_type) {
284                 case SMU_MCLK:
285                 case SMU_UCLK:
286                         clock_limit = smu->smu_table.boot_values.uclk;
287                         break;
288                 case SMU_GFXCLK:
289                 case SMU_SCLK:
290                         clock_limit = smu->smu_table.boot_values.gfxclk;
291                         break;
292                 case SMU_SOCCLK:
293                         clock_limit = smu->smu_table.boot_values.socclk;
294                         break;
295                 default:
296                         clock_limit = 0;
297                         break;
298                 }
299
300                 /* clock in Mhz unit */
301                 if (min)
302                         *min = clock_limit / 100;
303                 if (max)
304                         *max = clock_limit / 100;
305         } else {
306                 /*
307                  * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
308                  * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
309                  */
310                 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
311         }
312
313         if (lock_needed)
314                 mutex_unlock(&smu->mutex);
315
316         return ret;
317 }
318
319 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
320                               uint16_t level, uint32_t *value)
321 {
322         int ret = 0, clk_id = 0;
323         uint32_t param;
324
325         if (!value)
326                 return -EINVAL;
327
328         if (!smu_clk_dpm_is_enabled(smu, clk_type))
329                 return 0;
330
331         clk_id = smu_clk_get_index(smu, clk_type);
332         if (clk_id < 0)
333                 return clk_id;
334
335         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
336
337         ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
338                                           param);
339         if (ret)
340                 return ret;
341
342         ret = smu_read_smc_arg(smu, &param);
343         if (ret)
344                 return ret;
345
346         /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
347          * now, we un-support it */
348         *value = param & 0x7fffffff;
349
350         return ret;
351 }
352
353 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
354                             uint32_t *value)
355 {
356         return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
357 }
358
359 int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
360                             uint32_t *min_value, uint32_t *max_value)
361 {
362         int ret = 0;
363         uint32_t level_count = 0;
364
365         if (!min_value && !max_value)
366                 return -EINVAL;
367
368         if (min_value) {
369                 /* by default, level 0 clock value as min value */
370                 ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
371                 if (ret)
372                         return ret;
373         }
374
375         if (max_value) {
376                 ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
377                 if (ret)
378                         return ret;
379
380                 ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
381                 if (ret)
382                         return ret;
383         }
384
385         return ret;
386 }
387
388 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
389 {
390         enum smu_feature_mask feature_id = 0;
391
392         switch (clk_type) {
393         case SMU_MCLK:
394         case SMU_UCLK:
395                 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
396                 break;
397         case SMU_GFXCLK:
398         case SMU_SCLK:
399                 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
400                 break;
401         case SMU_SOCCLK:
402                 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
403                 break;
404         default:
405                 return true;
406         }
407
408         if(!smu_feature_is_enabled(smu, feature_id)) {
409                 return false;
410         }
411
412         return true;
413 }
414
415 /**
416  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
417  *
418  * @smu:        smu_context pointer
419  * @block_type: the IP block to power gate/ungate
420  * @gate:       to power gate if true, ungate otherwise
421  *
422  * This API uses no smu->mutex lock protection due to:
423  * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
424  *    This is guarded to be race condition free by the caller.
425  * 2. Or get called on user setting request of power_dpm_force_performance_level.
426  *    Under this case, the smu->mutex lock protection is already enforced on
427  *    the parent API smu_force_performance_level of the call path.
428  */
429 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
430                            bool gate)
431 {
432         int ret = 0;
433
434         switch (block_type) {
435         case AMD_IP_BLOCK_TYPE_UVD:
436                 ret = smu_dpm_set_uvd_enable(smu, gate);
437                 break;
438         case AMD_IP_BLOCK_TYPE_VCE:
439                 ret = smu_dpm_set_vce_enable(smu, gate);
440                 break;
441         case AMD_IP_BLOCK_TYPE_GFX:
442                 ret = smu_gfx_off_control(smu, gate);
443                 break;
444         case AMD_IP_BLOCK_TYPE_SDMA:
445                 ret = smu_powergate_sdma(smu, gate);
446                 break;
447         case AMD_IP_BLOCK_TYPE_JPEG:
448                 ret = smu_dpm_set_jpeg_enable(smu, gate);
449                 break;
450         default:
451                 break;
452         }
453
454         return ret;
455 }
456
457 int smu_get_power_num_states(struct smu_context *smu,
458                              struct pp_states_info *state_info)
459 {
460         if (!state_info)
461                 return -EINVAL;
462
463         /* not support power state */
464         memset(state_info, 0, sizeof(struct pp_states_info));
465         state_info->nums = 1;
466         state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
467
468         return 0;
469 }
470
471 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
472                            void *data, uint32_t *size)
473 {
474         struct smu_power_context *smu_power = &smu->smu_power;
475         struct smu_power_gate *power_gate = &smu_power->power_gate;
476         int ret = 0;
477
478         if(!data || !size)
479                 return -EINVAL;
480
481         switch (sensor) {
482         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
483                 *((uint32_t *)data) = smu->pstate_sclk;
484                 *size = 4;
485                 break;
486         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
487                 *((uint32_t *)data) = smu->pstate_mclk;
488                 *size = 4;
489                 break;
490         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
491                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
492                 *size = 8;
493                 break;
494         case AMDGPU_PP_SENSOR_UVD_POWER:
495                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
496                 *size = 4;
497                 break;
498         case AMDGPU_PP_SENSOR_VCE_POWER:
499                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
500                 *size = 4;
501                 break;
502         case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
503                 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
504                 *size = 4;
505                 break;
506         default:
507                 ret = -EINVAL;
508                 break;
509         }
510
511         if (ret)
512                 *size = 0;
513
514         return ret;
515 }
516
517 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
518                      void *table_data, bool drv2smu)
519 {
520         struct smu_table_context *smu_table = &smu->smu_table;
521         struct amdgpu_device *adev = smu->adev;
522         struct smu_table *table = NULL;
523         int ret = 0;
524         int table_id = smu_table_get_index(smu, table_index);
525
526         if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
527                 return -EINVAL;
528
529         table = &smu_table->tables[table_index];
530
531         if (drv2smu)
532                 memcpy(table->cpu_addr, table_data, table->size);
533
534         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
535                                           upper_32_bits(table->mc_address));
536         if (ret)
537                 return ret;
538         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
539                                           lower_32_bits(table->mc_address));
540         if (ret)
541                 return ret;
542         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
543                                           SMU_MSG_TransferTableDram2Smu :
544                                           SMU_MSG_TransferTableSmu2Dram,
545                                           table_id | ((argument & 0xFFFF) << 16));
546         if (ret)
547                 return ret;
548
549         /* flush hdp cache */
550         adev->nbio.funcs->hdp_flush(adev, NULL);
551
552         if (!drv2smu)
553                 memcpy(table_data, table->cpu_addr, table->size);
554
555         return ret;
556 }
557
558 bool is_support_sw_smu(struct amdgpu_device *adev)
559 {
560         if (adev->asic_type == CHIP_VEGA20)
561                 return (amdgpu_dpm == 2) ? true : false;
562         else if (adev->asic_type >= CHIP_ARCTURUS) {
563                 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
564                         return false;
565                 else
566                         return true;
567         } else
568                 return false;
569 }
570
571 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
572 {
573         if (!is_support_sw_smu(adev))
574                 return false;
575
576         if (adev->asic_type == CHIP_VEGA20)
577                 return true;
578
579         return false;
580 }
581
582 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
583 {
584         struct smu_table_context *smu_table = &smu->smu_table;
585         uint32_t powerplay_table_size;
586
587         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
588                 return -EINVAL;
589
590         mutex_lock(&smu->mutex);
591
592         if (smu_table->hardcode_pptable)
593                 *table = smu_table->hardcode_pptable;
594         else
595                 *table = smu_table->power_play_table;
596
597         powerplay_table_size = smu_table->power_play_table_size;
598
599         mutex_unlock(&smu->mutex);
600
601         return powerplay_table_size;
602 }
603
604 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
605 {
606         struct smu_table_context *smu_table = &smu->smu_table;
607         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
608         int ret = 0;
609
610         if (!smu->pm_enabled)
611                 return -EINVAL;
612         if (header->usStructureSize != size) {
613                 pr_err("pp table size not matched !\n");
614                 return -EIO;
615         }
616
617         mutex_lock(&smu->mutex);
618         if (!smu_table->hardcode_pptable)
619                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
620         if (!smu_table->hardcode_pptable) {
621                 ret = -ENOMEM;
622                 goto failed;
623         }
624
625         memcpy(smu_table->hardcode_pptable, buf, size);
626         smu_table->power_play_table = smu_table->hardcode_pptable;
627         smu_table->power_play_table_size = size;
628
629         /*
630          * Special hw_fini action(for Navi1x, the DPMs disablement will be
631          * skipped) may be needed for custom pptable uploading.
632          */
633         smu->uploading_custom_pp_table = true;
634
635         ret = smu_reset(smu);
636         if (ret)
637                 pr_info("smu reset failed, ret = %d\n", ret);
638
639         smu->uploading_custom_pp_table = false;
640
641 failed:
642         mutex_unlock(&smu->mutex);
643         return ret;
644 }
645
646 int smu_feature_init_dpm(struct smu_context *smu)
647 {
648         struct smu_feature *feature = &smu->smu_feature;
649         int ret = 0;
650         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
651
652         if (!smu->pm_enabled)
653                 return ret;
654         mutex_lock(&feature->mutex);
655         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
656         mutex_unlock(&feature->mutex);
657
658         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
659                                              SMU_FEATURE_MAX/32);
660         if (ret)
661                 return ret;
662
663         mutex_lock(&feature->mutex);
664         bitmap_or(feature->allowed, feature->allowed,
665                       (unsigned long *)allowed_feature_mask,
666                       feature->feature_num);
667         mutex_unlock(&feature->mutex);
668
669         return ret;
670 }
671
672
673 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
674 {
675         struct smu_feature *feature = &smu->smu_feature;
676         int feature_id;
677         int ret = 0;
678
679         if (smu->is_apu)
680                 return 1;
681
682         feature_id = smu_feature_get_index(smu, mask);
683         if (feature_id < 0)
684                 return 0;
685
686         WARN_ON(feature_id > feature->feature_num);
687
688         mutex_lock(&feature->mutex);
689         ret = test_bit(feature_id, feature->enabled);
690         mutex_unlock(&feature->mutex);
691
692         return ret;
693 }
694
695 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
696                             bool enable)
697 {
698         struct smu_feature *feature = &smu->smu_feature;
699         int feature_id;
700
701         feature_id = smu_feature_get_index(smu, mask);
702         if (feature_id < 0)
703                 return -EINVAL;
704
705         WARN_ON(feature_id > feature->feature_num);
706
707         return smu_feature_update_enable_state(smu,
708                                                1ULL << feature_id,
709                                                enable);
710 }
711
712 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
713 {
714         struct smu_feature *feature = &smu->smu_feature;
715         int feature_id;
716         int ret = 0;
717
718         feature_id = smu_feature_get_index(smu, mask);
719         if (feature_id < 0)
720                 return 0;
721
722         WARN_ON(feature_id > feature->feature_num);
723
724         mutex_lock(&feature->mutex);
725         ret = test_bit(feature_id, feature->supported);
726         mutex_unlock(&feature->mutex);
727
728         return ret;
729 }
730
731 int smu_feature_set_supported(struct smu_context *smu,
732                               enum smu_feature_mask mask,
733                               bool enable)
734 {
735         struct smu_feature *feature = &smu->smu_feature;
736         int feature_id;
737         int ret = 0;
738
739         feature_id = smu_feature_get_index(smu, mask);
740         if (feature_id < 0)
741                 return -EINVAL;
742
743         WARN_ON(feature_id > feature->feature_num);
744
745         mutex_lock(&feature->mutex);
746         if (enable)
747                 test_and_set_bit(feature_id, feature->supported);
748         else
749                 test_and_clear_bit(feature_id, feature->supported);
750         mutex_unlock(&feature->mutex);
751
752         return ret;
753 }
754
755 static int smu_set_funcs(struct amdgpu_device *adev)
756 {
757         struct smu_context *smu = &adev->smu;
758
759         if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
760                 smu->od_enabled = true;
761
762         switch (adev->asic_type) {
763         case CHIP_VEGA20:
764                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
765                 vega20_set_ppt_funcs(smu);
766                 break;
767         case CHIP_NAVI10:
768         case CHIP_NAVI14:
769         case CHIP_NAVI12:
770                 navi10_set_ppt_funcs(smu);
771                 break;
772         case CHIP_ARCTURUS:
773                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
774                 arcturus_set_ppt_funcs(smu);
775                 /* OD is not supported on Arcturus */
776                 smu->od_enabled =false;
777                 break;
778         case CHIP_RENOIR:
779                 renoir_set_ppt_funcs(smu);
780                 break;
781         default:
782                 return -EINVAL;
783         }
784
785         return 0;
786 }
787
788 static int smu_early_init(void *handle)
789 {
790         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
791         struct smu_context *smu = &adev->smu;
792
793         smu->adev = adev;
794         smu->pm_enabled = !!amdgpu_dpm;
795         smu->is_apu = false;
796         mutex_init(&smu->mutex);
797
798         return smu_set_funcs(adev);
799 }
800
801 static int smu_late_init(void *handle)
802 {
803         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
804         struct smu_context *smu = &adev->smu;
805
806         if (!smu->pm_enabled)
807                 return 0;
808
809         smu_handle_task(&adev->smu,
810                         smu->smu_dpm.dpm_level,
811                         AMD_PP_TASK_COMPLETE_INIT,
812                         false);
813
814         return 0;
815 }
816
817 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
818                             uint16_t *size, uint8_t *frev, uint8_t *crev,
819                             uint8_t **addr)
820 {
821         struct amdgpu_device *adev = smu->adev;
822         uint16_t data_start;
823
824         if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
825                                            size, frev, crev, &data_start))
826                 return -EINVAL;
827
828         *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
829
830         return 0;
831 }
832
833 static int smu_initialize_pptable(struct smu_context *smu)
834 {
835         /* TODO */
836         return 0;
837 }
838
839 static int smu_smc_table_sw_init(struct smu_context *smu)
840 {
841         int ret;
842
843         ret = smu_initialize_pptable(smu);
844         if (ret) {
845                 pr_err("Failed to init smu_initialize_pptable!\n");
846                 return ret;
847         }
848
849         /**
850          * Create smu_table structure, and init smc tables such as
851          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
852          */
853         ret = smu_init_smc_tables(smu);
854         if (ret) {
855                 pr_err("Failed to init smc tables!\n");
856                 return ret;
857         }
858
859         /**
860          * Create smu_power_context structure, and allocate smu_dpm_context and
861          * context size to fill the smu_power_context data.
862          */
863         ret = smu_init_power(smu);
864         if (ret) {
865                 pr_err("Failed to init smu_init_power!\n");
866                 return ret;
867         }
868
869         return 0;
870 }
871
872 static int smu_smc_table_sw_fini(struct smu_context *smu)
873 {
874         int ret;
875
876         ret = smu_fini_smc_tables(smu);
877         if (ret) {
878                 pr_err("Failed to smu_fini_smc_tables!\n");
879                 return ret;
880         }
881
882         return 0;
883 }
884
885 static int smu_sw_init(void *handle)
886 {
887         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
888         struct smu_context *smu = &adev->smu;
889         int ret;
890
891         smu->pool_size = adev->pm.smu_prv_buffer_size;
892         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
893         mutex_init(&smu->smu_feature.mutex);
894         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
895         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
896         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
897
898         mutex_init(&smu->smu_baco.mutex);
899         smu->smu_baco.state = SMU_BACO_STATE_EXIT;
900         smu->smu_baco.platform_support = false;
901
902         mutex_init(&smu->sensor_lock);
903         mutex_init(&smu->metrics_lock);
904
905         smu->watermarks_bitmap = 0;
906         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
907         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
908
909         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
910         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
911         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
912         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
913         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
914         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
915         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
916         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
917
918         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
919         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
920         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
921         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
922         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
923         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
924         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
925         smu->display_config = &adev->pm.pm_display_cfg;
926
927         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
928         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
929         ret = smu_init_microcode(smu);
930         if (ret) {
931                 pr_err("Failed to load smu firmware!\n");
932                 return ret;
933         }
934
935         ret = smu_smc_table_sw_init(smu);
936         if (ret) {
937                 pr_err("Failed to sw init smc table!\n");
938                 return ret;
939         }
940
941         ret = smu_register_irq_handler(smu);
942         if (ret) {
943                 pr_err("Failed to register smc irq handler!\n");
944                 return ret;
945         }
946
947         return 0;
948 }
949
950 static int smu_sw_fini(void *handle)
951 {
952         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
953         struct smu_context *smu = &adev->smu;
954         int ret;
955
956         kfree(smu->irq_source);
957         smu->irq_source = NULL;
958
959         ret = smu_smc_table_sw_fini(smu);
960         if (ret) {
961                 pr_err("Failed to sw fini smc table!\n");
962                 return ret;
963         }
964
965         ret = smu_fini_power(smu);
966         if (ret) {
967                 pr_err("Failed to init smu_fini_power!\n");
968                 return ret;
969         }
970
971         return 0;
972 }
973
974 static int smu_init_fb_allocations(struct smu_context *smu)
975 {
976         struct amdgpu_device *adev = smu->adev;
977         struct smu_table_context *smu_table = &smu->smu_table;
978         struct smu_table *tables = smu_table->tables;
979         int ret, i;
980
981         for (i = 0; i < SMU_TABLE_COUNT; i++) {
982                 if (tables[i].size == 0)
983                         continue;
984                 ret = amdgpu_bo_create_kernel(adev,
985                                               tables[i].size,
986                                               tables[i].align,
987                                               tables[i].domain,
988                                               &tables[i].bo,
989                                               &tables[i].mc_address,
990                                               &tables[i].cpu_addr);
991                 if (ret)
992                         goto failed;
993         }
994
995         return 0;
996 failed:
997         while (--i >= 0) {
998                 if (tables[i].size == 0)
999                         continue;
1000                 amdgpu_bo_free_kernel(&tables[i].bo,
1001                                       &tables[i].mc_address,
1002                                       &tables[i].cpu_addr);
1003
1004         }
1005         return ret;
1006 }
1007
1008 static int smu_fini_fb_allocations(struct smu_context *smu)
1009 {
1010         struct smu_table_context *smu_table = &smu->smu_table;
1011         struct smu_table *tables = smu_table->tables;
1012         uint32_t i = 0;
1013
1014         if (!tables)
1015                 return 0;
1016
1017         for (i = 0; i < SMU_TABLE_COUNT; i++) {
1018                 if (tables[i].size == 0)
1019                         continue;
1020                 amdgpu_bo_free_kernel(&tables[i].bo,
1021                                       &tables[i].mc_address,
1022                                       &tables[i].cpu_addr);
1023         }
1024
1025         return 0;
1026 }
1027
1028 static int smu_smc_table_hw_init(struct smu_context *smu,
1029                                  bool initialize)
1030 {
1031         struct amdgpu_device *adev = smu->adev;
1032         int ret;
1033
1034         if (smu_is_dpm_running(smu) && adev->in_suspend) {
1035                 pr_info("dpm has been enabled\n");
1036                 return 0;
1037         }
1038
1039         if (adev->asic_type != CHIP_ARCTURUS) {
1040                 ret = smu_init_display_count(smu, 0);
1041                 if (ret)
1042                         return ret;
1043         }
1044
1045         if (initialize) {
1046                 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1047                 ret = smu_get_vbios_bootup_values(smu);
1048                 if (ret)
1049                         return ret;
1050
1051                 ret = smu_setup_pptable(smu);
1052                 if (ret)
1053                         return ret;
1054
1055                 ret = smu_get_clk_info_from_vbios(smu);
1056                 if (ret)
1057                         return ret;
1058
1059                 /*
1060                  * check if the format_revision in vbios is up to pptable header
1061                  * version, and the structure size is not 0.
1062                  */
1063                 ret = smu_check_pptable(smu);
1064                 if (ret)
1065                         return ret;
1066
1067                 /*
1068                  * allocate vram bos to store smc table contents.
1069                  */
1070                 ret = smu_init_fb_allocations(smu);
1071                 if (ret)
1072                         return ret;
1073
1074                 /*
1075                  * Parse pptable format and fill PPTable_t smc_pptable to
1076                  * smu_table_context structure. And read the smc_dpm_table from vbios,
1077                  * then fill it into smc_pptable.
1078                  */
1079                 ret = smu_parse_pptable(smu);
1080                 if (ret)
1081                         return ret;
1082
1083                 /*
1084                  * Send msg GetDriverIfVersion to check if the return value is equal
1085                  * with DRIVER_IF_VERSION of smc header.
1086                  */
1087                 ret = smu_check_fw_version(smu);
1088                 if (ret)
1089                         return ret;
1090         }
1091
1092         /* smu_dump_pptable(smu); */
1093         if (!amdgpu_sriov_vf(adev)) {
1094                 /*
1095                  * Copy pptable bo in the vram to smc with SMU MSGs such as
1096                  * SetDriverDramAddr and TransferTableDram2Smu.
1097                  */
1098                 ret = smu_write_pptable(smu);
1099                 if (ret)
1100                         return ret;
1101
1102                 /* issue Run*Btc msg */
1103                 ret = smu_run_btc(smu);
1104                 if (ret)
1105                         return ret;
1106                 ret = smu_feature_set_allowed_mask(smu);
1107                 if (ret)
1108                         return ret;
1109
1110                 ret = smu_system_features_control(smu, true);
1111                 if (ret)
1112                         return ret;
1113         }
1114         if (adev->asic_type != CHIP_ARCTURUS) {
1115                 ret = smu_notify_display_change(smu);
1116                 if (ret)
1117                         return ret;
1118
1119                 /*
1120                  * Set min deep sleep dce fclk with bootup value from vbios via
1121                  * SetMinDeepSleepDcefclk MSG.
1122                  */
1123                 ret = smu_set_min_dcef_deep_sleep(smu);
1124                 if (ret)
1125                         return ret;
1126         }
1127
1128         /*
1129          * Set initialized values (get from vbios) to dpm tables context such as
1130          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1131          * type of clks.
1132          */
1133         if (initialize) {
1134                 ret = smu_populate_smc_tables(smu);
1135                 if (ret)
1136                         return ret;
1137
1138                 ret = smu_init_max_sustainable_clocks(smu);
1139                 if (ret)
1140                         return ret;
1141         }
1142
1143         if (adev->asic_type != CHIP_ARCTURUS) {
1144                 ret = smu_override_pcie_parameters(smu);
1145                 if (ret)
1146                         return ret;
1147         }
1148
1149         ret = smu_set_default_od_settings(smu, initialize);
1150         if (ret)
1151                 return ret;
1152
1153         if (initialize) {
1154                 ret = smu_populate_umd_state_clk(smu);
1155                 if (ret)
1156                         return ret;
1157
1158                 ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
1159                 if (ret)
1160                         return ret;
1161         }
1162
1163         /*
1164          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1165          */
1166         if (!amdgpu_sriov_vf(adev)) {
1167                 ret = smu_set_tool_table_location(smu);
1168         }
1169         if (!smu_is_dpm_running(smu))
1170                 pr_info("dpm has been disabled\n");
1171
1172         return ret;
1173 }
1174
1175 /**
1176  * smu_alloc_memory_pool - allocate memory pool in the system memory
1177  *
1178  * @smu: amdgpu_device pointer
1179  *
1180  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1181  * and DramLogSetDramAddr can notify it changed.
1182  *
1183  * Returns 0 on success, error on failure.
1184  */
1185 static int smu_alloc_memory_pool(struct smu_context *smu)
1186 {
1187         struct amdgpu_device *adev = smu->adev;
1188         struct smu_table_context *smu_table = &smu->smu_table;
1189         struct smu_table *memory_pool = &smu_table->memory_pool;
1190         uint64_t pool_size = smu->pool_size;
1191         int ret = 0;
1192
1193         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1194                 return ret;
1195
1196         memory_pool->size = pool_size;
1197         memory_pool->align = PAGE_SIZE;
1198         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1199
1200         switch (pool_size) {
1201         case SMU_MEMORY_POOL_SIZE_256_MB:
1202         case SMU_MEMORY_POOL_SIZE_512_MB:
1203         case SMU_MEMORY_POOL_SIZE_1_GB:
1204         case SMU_MEMORY_POOL_SIZE_2_GB:
1205                 ret = amdgpu_bo_create_kernel(adev,
1206                                               memory_pool->size,
1207                                               memory_pool->align,
1208                                               memory_pool->domain,
1209                                               &memory_pool->bo,
1210                                               &memory_pool->mc_address,
1211                                               &memory_pool->cpu_addr);
1212                 break;
1213         default:
1214                 break;
1215         }
1216
1217         return ret;
1218 }
1219
1220 static int smu_free_memory_pool(struct smu_context *smu)
1221 {
1222         struct smu_table_context *smu_table = &smu->smu_table;
1223         struct smu_table *memory_pool = &smu_table->memory_pool;
1224
1225         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1226                 return 0;
1227
1228         amdgpu_bo_free_kernel(&memory_pool->bo,
1229                               &memory_pool->mc_address,
1230                               &memory_pool->cpu_addr);
1231
1232         memset(memory_pool, 0, sizeof(struct smu_table));
1233
1234         return 0;
1235 }
1236
1237 static int smu_start_smc_engine(struct smu_context *smu)
1238 {
1239         struct amdgpu_device *adev = smu->adev;
1240         int ret = 0;
1241
1242         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1243                 if (adev->asic_type < CHIP_NAVI10) {
1244                         if (smu->ppt_funcs->load_microcode) {
1245                                 ret = smu->ppt_funcs->load_microcode(smu);
1246                                 if (ret)
1247                                         return ret;
1248                         }
1249                 }
1250         }
1251
1252         if (smu->ppt_funcs->check_fw_status) {
1253                 ret = smu->ppt_funcs->check_fw_status(smu);
1254                 if (ret)
1255                         pr_err("SMC is not ready\n");
1256         }
1257
1258         return ret;
1259 }
1260
1261 static int smu_hw_init(void *handle)
1262 {
1263         int ret;
1264         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1265         struct smu_context *smu = &adev->smu;
1266
1267         ret = smu_start_smc_engine(smu);
1268         if (ret) {
1269                 pr_err("SMU is not ready yet!\n");
1270                 return ret;
1271         }
1272
1273         if (smu->is_apu) {
1274                 smu_powergate_sdma(&adev->smu, false);
1275                 smu_powergate_vcn(&adev->smu, false);
1276                 smu_powergate_jpeg(&adev->smu, false);
1277                 smu_set_gfx_cgpg(&adev->smu, true);
1278         }
1279
1280         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1281                 return 0;
1282
1283         if (!smu->pm_enabled)
1284                 return 0;
1285
1286         ret = smu_feature_init_dpm(smu);
1287         if (ret)
1288                 goto failed;
1289
1290         ret = smu_smc_table_hw_init(smu, true);
1291         if (ret)
1292                 goto failed;
1293
1294         ret = smu_alloc_memory_pool(smu);
1295         if (ret)
1296                 goto failed;
1297
1298         /*
1299          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1300          * pool location.
1301          */
1302         ret = smu_notify_memory_pool_location(smu);
1303         if (ret)
1304                 goto failed;
1305
1306         ret = smu_start_thermal_control(smu);
1307         if (ret)
1308                 goto failed;
1309
1310         if (!smu->pm_enabled)
1311                 adev->pm.dpm_enabled = false;
1312         else
1313                 adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1314
1315         pr_info("SMU is initialized successfully!\n");
1316
1317         return 0;
1318
1319 failed:
1320         return ret;
1321 }
1322
1323 static int smu_stop_dpms(struct smu_context *smu)
1324 {
1325         return smu_system_features_control(smu, false);
1326 }
1327
1328 static int smu_hw_fini(void *handle)
1329 {
1330         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1331         struct smu_context *smu = &adev->smu;
1332         struct smu_table_context *table_context = &smu->smu_table;
1333         int ret = 0;
1334
1335         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1336                 return 0;
1337
1338         if (smu->is_apu) {
1339                 smu_powergate_sdma(&adev->smu, true);
1340                 smu_powergate_vcn(&adev->smu, true);
1341                 smu_powergate_jpeg(&adev->smu, true);
1342         }
1343
1344         if (!amdgpu_sriov_vf(adev)){
1345                 ret = smu_stop_thermal_control(smu);
1346                 if (ret) {
1347                         pr_warn("Fail to stop thermal control!\n");
1348                         return ret;
1349                 }
1350
1351                 /*
1352                  * For custom pptable uploading, skip the DPM features
1353                  * disable process on Navi1x ASICs.
1354                  *   - As the gfx related features are under control of
1355                  *     RLC on those ASICs. RLC reinitialization will be
1356                  *     needed to reenable them. That will cost much more
1357                  *     efforts.
1358                  *
1359                  *   - SMU firmware can handle the DPM reenablement
1360                  *     properly.
1361                  */
1362                 if (!smu->uploading_custom_pp_table ||
1363                                 !((adev->asic_type >= CHIP_NAVI10) &&
1364                                         (adev->asic_type <= CHIP_NAVI12))) {
1365                         ret = smu_stop_dpms(smu);
1366                         if (ret) {
1367                                 pr_warn("Fail to stop Dpms!\n");
1368                                 return ret;
1369                         }
1370                 }
1371         }
1372
1373         kfree(table_context->driver_pptable);
1374         table_context->driver_pptable = NULL;
1375
1376         kfree(table_context->max_sustainable_clocks);
1377         table_context->max_sustainable_clocks = NULL;
1378
1379         kfree(table_context->overdrive_table);
1380         table_context->overdrive_table = NULL;
1381
1382         ret = smu_fini_fb_allocations(smu);
1383         if (ret)
1384                 return ret;
1385
1386         ret = smu_free_memory_pool(smu);
1387         if (ret)
1388                 return ret;
1389
1390         return 0;
1391 }
1392
1393 int smu_reset(struct smu_context *smu)
1394 {
1395         struct amdgpu_device *adev = smu->adev;
1396         int ret = 0;
1397
1398         ret = smu_hw_fini(adev);
1399         if (ret)
1400                 return ret;
1401
1402         ret = smu_hw_init(adev);
1403         if (ret)
1404                 return ret;
1405
1406         return ret;
1407 }
1408
1409 static int smu_suspend(void *handle)
1410 {
1411         int ret;
1412         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1413         struct smu_context *smu = &adev->smu;
1414         bool baco_feature_is_enabled = false;
1415
1416         if(!smu->is_apu)
1417                 baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
1418
1419         ret = smu_system_features_control(smu, false);
1420         if (ret)
1421                 return ret;
1422
1423         if (baco_feature_is_enabled) {
1424                 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1425                 if (ret) {
1426                         pr_warn("set BACO feature enabled failed, return %d\n", ret);
1427                         return ret;
1428                 }
1429         }
1430
1431         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1432
1433         if (adev->asic_type >= CHIP_NAVI10 &&
1434             adev->gfx.rlc.funcs->stop)
1435                 adev->gfx.rlc.funcs->stop(adev);
1436         if (smu->is_apu)
1437                 smu_set_gfx_cgpg(&adev->smu, false);
1438
1439         return 0;
1440 }
1441
1442 static int smu_resume(void *handle)
1443 {
1444         int ret;
1445         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1446         struct smu_context *smu = &adev->smu;
1447
1448         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1449                 return 0;
1450
1451         if (!smu->pm_enabled)
1452                 return 0;
1453
1454         pr_info("SMU is resuming...\n");
1455
1456         ret = smu_start_smc_engine(smu);
1457         if (ret) {
1458                 pr_err("SMU is not ready yet!\n");
1459                 goto failed;
1460         }
1461
1462         ret = smu_smc_table_hw_init(smu, false);
1463         if (ret)
1464                 goto failed;
1465
1466         ret = smu_start_thermal_control(smu);
1467         if (ret)
1468                 goto failed;
1469
1470         if (smu->is_apu)
1471                 smu_set_gfx_cgpg(&adev->smu, true);
1472
1473         smu->disable_uclk_switch = 0;
1474
1475         pr_info("SMU is resumed successfully!\n");
1476
1477         return 0;
1478
1479 failed:
1480         return ret;
1481 }
1482
1483 int smu_display_configuration_change(struct smu_context *smu,
1484                                      const struct amd_pp_display_configuration *display_config)
1485 {
1486         int index = 0;
1487         int num_of_active_display = 0;
1488
1489         if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1490                 return -EINVAL;
1491
1492         if (!display_config)
1493                 return -EINVAL;
1494
1495         mutex_lock(&smu->mutex);
1496
1497         if (smu->ppt_funcs->set_deep_sleep_dcefclk)
1498                 smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
1499                                 display_config->min_dcef_deep_sleep_set_clk / 100);
1500
1501         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1502                 if (display_config->displays[index].controller_id != 0)
1503                         num_of_active_display++;
1504         }
1505
1506         smu_set_active_display_count(smu, num_of_active_display);
1507
1508         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1509                            display_config->cpu_cc6_disable,
1510                            display_config->cpu_pstate_disable,
1511                            display_config->nb_pstate_switch_disable);
1512
1513         mutex_unlock(&smu->mutex);
1514
1515         return 0;
1516 }
1517
1518 static int smu_get_clock_info(struct smu_context *smu,
1519                               struct smu_clock_info *clk_info,
1520                               enum smu_perf_level_designation designation)
1521 {
1522         int ret;
1523         struct smu_performance_level level = {0};
1524
1525         if (!clk_info)
1526                 return -EINVAL;
1527
1528         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1529         if (ret)
1530                 return -EINVAL;
1531
1532         clk_info->min_mem_clk = level.memory_clock;
1533         clk_info->min_eng_clk = level.core_clock;
1534         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1535
1536         ret = smu_get_perf_level(smu, designation, &level);
1537         if (ret)
1538                 return -EINVAL;
1539
1540         clk_info->min_mem_clk = level.memory_clock;
1541         clk_info->min_eng_clk = level.core_clock;
1542         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1543
1544         return 0;
1545 }
1546
1547 int smu_get_current_clocks(struct smu_context *smu,
1548                            struct amd_pp_clock_info *clocks)
1549 {
1550         struct amd_pp_simple_clock_info simple_clocks = {0};
1551         struct smu_clock_info hw_clocks;
1552         int ret = 0;
1553
1554         if (!is_support_sw_smu(smu->adev))
1555                 return -EINVAL;
1556
1557         mutex_lock(&smu->mutex);
1558
1559         smu_get_dal_power_level(smu, &simple_clocks);
1560
1561         if (smu->support_power_containment)
1562                 ret = smu_get_clock_info(smu, &hw_clocks,
1563                                          PERF_LEVEL_POWER_CONTAINMENT);
1564         else
1565                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1566
1567         if (ret) {
1568                 pr_err("Error in smu_get_clock_info\n");
1569                 goto failed;
1570         }
1571
1572         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1573         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1574         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1575         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1576         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1577         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1578         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1579         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1580
1581         if (simple_clocks.level == 0)
1582                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1583         else
1584                 clocks->max_clocks_state = simple_clocks.level;
1585
1586         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1587                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1588                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1589         }
1590
1591 failed:
1592         mutex_unlock(&smu->mutex);
1593         return ret;
1594 }
1595
1596 static int smu_set_clockgating_state(void *handle,
1597                                      enum amd_clockgating_state state)
1598 {
1599         return 0;
1600 }
1601
1602 static int smu_set_powergating_state(void *handle,
1603                                      enum amd_powergating_state state)
1604 {
1605         return 0;
1606 }
1607
1608 static int smu_enable_umd_pstate(void *handle,
1609                       enum amd_dpm_forced_level *level)
1610 {
1611         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1612                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1613                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1614                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1615
1616         struct smu_context *smu = (struct smu_context*)(handle);
1617         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1618
1619         if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
1620                 return -EINVAL;
1621
1622         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1623                 /* enter umd pstate, save current level, disable gfx cg*/
1624                 if (*level & profile_mode_mask) {
1625                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1626                         smu_dpm_ctx->enable_umd_pstate = true;
1627                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1628                                                                AMD_IP_BLOCK_TYPE_GFX,
1629                                                                AMD_CG_STATE_UNGATE);
1630                         amdgpu_device_ip_set_powergating_state(smu->adev,
1631                                                                AMD_IP_BLOCK_TYPE_GFX,
1632                                                                AMD_PG_STATE_UNGATE);
1633                 }
1634         } else {
1635                 /* exit umd pstate, restore level, enable gfx cg*/
1636                 if (!(*level & profile_mode_mask)) {
1637                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1638                                 *level = smu_dpm_ctx->saved_dpm_level;
1639                         smu_dpm_ctx->enable_umd_pstate = false;
1640                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1641                                                                AMD_IP_BLOCK_TYPE_GFX,
1642                                                                AMD_CG_STATE_GATE);
1643                         amdgpu_device_ip_set_powergating_state(smu->adev,
1644                                                                AMD_IP_BLOCK_TYPE_GFX,
1645                                                                AMD_PG_STATE_GATE);
1646                 }
1647         }
1648
1649         return 0;
1650 }
1651
1652 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1653                                    enum amd_dpm_forced_level level,
1654                                    bool skip_display_settings)
1655 {
1656         int ret = 0;
1657         int index = 0;
1658         long workload;
1659         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1660
1661         if (!smu->pm_enabled)
1662                 return -EINVAL;
1663
1664         if (!skip_display_settings) {
1665                 ret = smu_display_config_changed(smu);
1666                 if (ret) {
1667                         pr_err("Failed to change display config!");
1668                         return ret;
1669                 }
1670         }
1671
1672         ret = smu_apply_clocks_adjust_rules(smu);
1673         if (ret) {
1674                 pr_err("Failed to apply clocks adjust rules!");
1675                 return ret;
1676         }
1677
1678         if (!skip_display_settings) {
1679                 ret = smu_notify_smc_display_config(smu);
1680                 if (ret) {
1681                         pr_err("Failed to notify smc display config!");
1682                         return ret;
1683                 }
1684         }
1685
1686         if (smu_dpm_ctx->dpm_level != level) {
1687                 ret = smu_asic_set_performance_level(smu, level);
1688                 if (ret) {
1689                         pr_err("Failed to set performance level!");
1690                         return ret;
1691                 }
1692
1693                 /* update the saved copy */
1694                 smu_dpm_ctx->dpm_level = level;
1695         }
1696
1697         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1698                 index = fls(smu->workload_mask);
1699                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1700                 workload = smu->workload_setting[index];
1701
1702                 if (smu->power_profile_mode != workload)
1703                         smu_set_power_profile_mode(smu, &workload, 0, false);
1704         }
1705
1706         return ret;
1707 }
1708
1709 int smu_handle_task(struct smu_context *smu,
1710                     enum amd_dpm_forced_level level,
1711                     enum amd_pp_task task_id,
1712                     bool lock_needed)
1713 {
1714         int ret = 0;
1715
1716         if (lock_needed)
1717                 mutex_lock(&smu->mutex);
1718
1719         switch (task_id) {
1720         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1721                 ret = smu_pre_display_config_changed(smu);
1722                 if (ret)
1723                         goto out;
1724                 ret = smu_set_cpu_power_state(smu);
1725                 if (ret)
1726                         goto out;
1727                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1728                 break;
1729         case AMD_PP_TASK_COMPLETE_INIT:
1730         case AMD_PP_TASK_READJUST_POWER_STATE:
1731                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1732                 break;
1733         default:
1734                 break;
1735         }
1736
1737 out:
1738         if (lock_needed)
1739                 mutex_unlock(&smu->mutex);
1740
1741         return ret;
1742 }
1743
1744 int smu_switch_power_profile(struct smu_context *smu,
1745                              enum PP_SMC_POWER_PROFILE type,
1746                              bool en)
1747 {
1748         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1749         long workload;
1750         uint32_t index;
1751
1752         if (!smu->pm_enabled)
1753                 return -EINVAL;
1754
1755         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1756                 return -EINVAL;
1757
1758         mutex_lock(&smu->mutex);
1759
1760         if (!en) {
1761                 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1762                 index = fls(smu->workload_mask);
1763                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1764                 workload = smu->workload_setting[index];
1765         } else {
1766                 smu->workload_mask |= (1 << smu->workload_prority[type]);
1767                 index = fls(smu->workload_mask);
1768                 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1769                 workload = smu->workload_setting[index];
1770         }
1771
1772         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1773                 smu_set_power_profile_mode(smu, &workload, 0, false);
1774
1775         mutex_unlock(&smu->mutex);
1776
1777         return 0;
1778 }
1779
1780 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1781 {
1782         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1783         enum amd_dpm_forced_level level;
1784
1785         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1786                 return -EINVAL;
1787
1788         mutex_lock(&(smu->mutex));
1789         level = smu_dpm_ctx->dpm_level;
1790         mutex_unlock(&(smu->mutex));
1791
1792         return level;
1793 }
1794
1795 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1796 {
1797         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1798         int ret = 0;
1799
1800         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1801                 return -EINVAL;
1802
1803         mutex_lock(&smu->mutex);
1804
1805         ret = smu_enable_umd_pstate(smu, &level);
1806         if (ret) {
1807                 mutex_unlock(&smu->mutex);
1808                 return ret;
1809         }
1810
1811         ret = smu_handle_task(smu, level,
1812                               AMD_PP_TASK_READJUST_POWER_STATE,
1813                               false);
1814
1815         mutex_unlock(&smu->mutex);
1816
1817         return ret;
1818 }
1819
1820 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1821 {
1822         int ret = 0;
1823
1824         mutex_lock(&smu->mutex);
1825         ret = smu_init_display_count(smu, count);
1826         mutex_unlock(&smu->mutex);
1827
1828         return ret;
1829 }
1830
1831 int smu_force_clk_levels(struct smu_context *smu,
1832                          enum smu_clk_type clk_type,
1833                          uint32_t mask,
1834                          bool lock_needed)
1835 {
1836         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1837         int ret = 0;
1838
1839         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1840                 pr_debug("force clock level is for dpm manual mode only.\n");
1841                 return -EINVAL;
1842         }
1843
1844         if (lock_needed)
1845                 mutex_lock(&smu->mutex);
1846
1847         if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1848                 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1849
1850         if (lock_needed)
1851                 mutex_unlock(&smu->mutex);
1852
1853         return ret;
1854 }
1855
1856 int smu_set_mp1_state(struct smu_context *smu,
1857                       enum pp_mp1_state mp1_state)
1858 {
1859         uint16_t msg;
1860         int ret;
1861
1862         /*
1863          * The SMC is not fully ready. That may be
1864          * expected as the IP may be masked.
1865          * So, just return without error.
1866          */
1867         if (!smu->pm_enabled)
1868                 return 0;
1869
1870         mutex_lock(&smu->mutex);
1871
1872         switch (mp1_state) {
1873         case PP_MP1_STATE_SHUTDOWN:
1874                 msg = SMU_MSG_PrepareMp1ForShutdown;
1875                 break;
1876         case PP_MP1_STATE_UNLOAD:
1877                 msg = SMU_MSG_PrepareMp1ForUnload;
1878                 break;
1879         case PP_MP1_STATE_RESET:
1880                 msg = SMU_MSG_PrepareMp1ForReset;
1881                 break;
1882         case PP_MP1_STATE_NONE:
1883         default:
1884                 mutex_unlock(&smu->mutex);
1885                 return 0;
1886         }
1887
1888         /* some asics may not support those messages */
1889         if (smu_msg_get_index(smu, msg) < 0) {
1890                 mutex_unlock(&smu->mutex);
1891                 return 0;
1892         }
1893
1894         ret = smu_send_smc_msg(smu, msg);
1895         if (ret)
1896                 pr_err("[PrepareMp1] Failed!\n");
1897
1898         mutex_unlock(&smu->mutex);
1899
1900         return ret;
1901 }
1902
1903 int smu_set_df_cstate(struct smu_context *smu,
1904                       enum pp_df_cstate state)
1905 {
1906         int ret = 0;
1907
1908         /*
1909          * The SMC is not fully ready. That may be
1910          * expected as the IP may be masked.
1911          * So, just return without error.
1912          */
1913         if (!smu->pm_enabled)
1914                 return 0;
1915
1916         if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1917                 return 0;
1918
1919         mutex_lock(&smu->mutex);
1920
1921         ret = smu->ppt_funcs->set_df_cstate(smu, state);
1922         if (ret)
1923                 pr_err("[SetDfCstate] failed!\n");
1924
1925         mutex_unlock(&smu->mutex);
1926
1927         return ret;
1928 }
1929
1930 int smu_write_watermarks_table(struct smu_context *smu)
1931 {
1932         int ret = 0;
1933         struct smu_table_context *smu_table = &smu->smu_table;
1934         struct smu_table *table = NULL;
1935
1936         table = &smu_table->tables[SMU_TABLE_WATERMARKS];
1937
1938         if (!table->cpu_addr)
1939                 return -EINVAL;
1940
1941         ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
1942                                 true);
1943
1944         return ret;
1945 }
1946
1947 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
1948                 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
1949 {
1950         struct smu_table *watermarks;
1951         void *table;
1952
1953         if (!smu->smu_table.tables)
1954                 return 0;
1955
1956         watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
1957         table = watermarks->cpu_addr;
1958
1959         mutex_lock(&smu->mutex);
1960
1961         if (!smu->disable_watermark &&
1962                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1963                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1964                 smu_set_watermarks_table(smu, table, clock_ranges);
1965                 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1966                 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1967         }
1968
1969         mutex_unlock(&smu->mutex);
1970
1971         return 0;
1972 }
1973
1974 const struct amd_ip_funcs smu_ip_funcs = {
1975         .name = "smu",
1976         .early_init = smu_early_init,
1977         .late_init = smu_late_init,
1978         .sw_init = smu_sw_init,
1979         .sw_fini = smu_sw_fini,
1980         .hw_init = smu_hw_init,
1981         .hw_fini = smu_hw_fini,
1982         .suspend = smu_suspend,
1983         .resume = smu_resume,
1984         .is_idle = NULL,
1985         .check_soft_reset = NULL,
1986         .wait_for_idle = NULL,
1987         .soft_reset = NULL,
1988         .set_clockgating_state = smu_set_clockgating_state,
1989         .set_powergating_state = smu_set_powergating_state,
1990         .enable_umd_pstate = smu_enable_umd_pstate,
1991 };
1992
1993 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1994 {
1995         .type = AMD_IP_BLOCK_TYPE_SMC,
1996         .major = 11,
1997         .minor = 0,
1998         .rev = 0,
1999         .funcs = &smu_ip_funcs,
2000 };
2001
2002 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2003 {
2004         .type = AMD_IP_BLOCK_TYPE_SMC,
2005         .major = 12,
2006         .minor = 0,
2007         .rev = 0,
2008         .funcs = &smu_ip_funcs,
2009 };
2010
2011 int smu_load_microcode(struct smu_context *smu)
2012 {
2013         int ret = 0;
2014
2015         mutex_lock(&smu->mutex);
2016
2017         if (smu->ppt_funcs->load_microcode)
2018                 ret = smu->ppt_funcs->load_microcode(smu);
2019
2020         mutex_unlock(&smu->mutex);
2021
2022         return ret;
2023 }
2024
2025 int smu_check_fw_status(struct smu_context *smu)
2026 {
2027         int ret = 0;
2028
2029         mutex_lock(&smu->mutex);
2030
2031         if (smu->ppt_funcs->check_fw_status)
2032                 ret = smu->ppt_funcs->check_fw_status(smu);
2033
2034         mutex_unlock(&smu->mutex);
2035
2036         return ret;
2037 }
2038
2039 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2040 {
2041         int ret = 0;
2042
2043         mutex_lock(&smu->mutex);
2044
2045         if (smu->ppt_funcs->set_gfx_cgpg)
2046                 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2047
2048         mutex_unlock(&smu->mutex);
2049
2050         return ret;
2051 }
2052
2053 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
2054 {
2055         int ret = 0;
2056
2057         mutex_lock(&smu->mutex);
2058
2059         if (smu->ppt_funcs->set_fan_speed_rpm)
2060                 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2061
2062         mutex_unlock(&smu->mutex);
2063
2064         return ret;
2065 }
2066
2067 int smu_get_power_limit(struct smu_context *smu,
2068                         uint32_t *limit,
2069                         bool def,
2070                         bool lock_needed)
2071 {
2072         int ret = 0;
2073
2074         if (lock_needed)
2075                 mutex_lock(&smu->mutex);
2076
2077         if (smu->ppt_funcs->get_power_limit)
2078                 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2079
2080         if (lock_needed)
2081                 mutex_unlock(&smu->mutex);
2082
2083         return ret;
2084 }
2085
2086 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2087 {
2088         int ret = 0;
2089
2090         mutex_lock(&smu->mutex);
2091
2092         if (smu->ppt_funcs->set_power_limit)
2093                 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2094
2095         mutex_unlock(&smu->mutex);
2096
2097         return ret;
2098 }
2099
2100 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2101 {
2102         int ret = 0;
2103
2104         mutex_lock(&smu->mutex);
2105
2106         if (smu->ppt_funcs->print_clk_levels)
2107                 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2108
2109         mutex_unlock(&smu->mutex);
2110
2111         return ret;
2112 }
2113
2114 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2115 {
2116         int ret = 0;
2117
2118         mutex_lock(&smu->mutex);
2119
2120         if (smu->ppt_funcs->get_od_percentage)
2121                 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2122
2123         mutex_unlock(&smu->mutex);
2124
2125         return ret;
2126 }
2127
2128 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2129 {
2130         int ret = 0;
2131
2132         mutex_lock(&smu->mutex);
2133
2134         if (smu->ppt_funcs->set_od_percentage)
2135                 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2136
2137         mutex_unlock(&smu->mutex);
2138
2139         return ret;
2140 }
2141
2142 int smu_od_edit_dpm_table(struct smu_context *smu,
2143                           enum PP_OD_DPM_TABLE_COMMAND type,
2144                           long *input, uint32_t size)
2145 {
2146         int ret = 0;
2147
2148         mutex_lock(&smu->mutex);
2149
2150         if (smu->ppt_funcs->od_edit_dpm_table)
2151                 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2152
2153         mutex_unlock(&smu->mutex);
2154
2155         return ret;
2156 }
2157
2158 int smu_read_sensor(struct smu_context *smu,
2159                     enum amd_pp_sensors sensor,
2160                     void *data, uint32_t *size)
2161 {
2162         int ret = 0;
2163
2164         mutex_lock(&smu->mutex);
2165
2166         if (smu->ppt_funcs->read_sensor)
2167                 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2168
2169         mutex_unlock(&smu->mutex);
2170
2171         return ret;
2172 }
2173
2174 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2175 {
2176         int ret = 0;
2177
2178         mutex_lock(&smu->mutex);
2179
2180         if (smu->ppt_funcs->get_power_profile_mode)
2181                 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2182
2183         mutex_unlock(&smu->mutex);
2184
2185         return ret;
2186 }
2187
2188 int smu_set_power_profile_mode(struct smu_context *smu,
2189                                long *param,
2190                                uint32_t param_size,
2191                                bool lock_needed)
2192 {
2193         int ret = 0;
2194
2195         if (lock_needed)
2196                 mutex_lock(&smu->mutex);
2197
2198         if (smu->ppt_funcs->set_power_profile_mode)
2199                 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2200
2201         if (lock_needed)
2202                 mutex_unlock(&smu->mutex);
2203
2204         return ret;
2205 }
2206
2207
2208 int smu_get_fan_control_mode(struct smu_context *smu)
2209 {
2210         int ret = 0;
2211
2212         mutex_lock(&smu->mutex);
2213
2214         if (smu->ppt_funcs->get_fan_control_mode)
2215                 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2216
2217         mutex_unlock(&smu->mutex);
2218
2219         return ret;
2220 }
2221
2222 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2223 {
2224         int ret = 0;
2225
2226         mutex_lock(&smu->mutex);
2227
2228         if (smu->ppt_funcs->set_fan_control_mode)
2229                 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2230
2231         mutex_unlock(&smu->mutex);
2232
2233         return ret;
2234 }
2235
2236 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2237 {
2238         int ret = 0;
2239
2240         mutex_lock(&smu->mutex);
2241
2242         if (smu->ppt_funcs->get_fan_speed_percent)
2243                 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2244
2245         mutex_unlock(&smu->mutex);
2246
2247         return ret;
2248 }
2249
2250 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2251 {
2252         int ret = 0;
2253
2254         mutex_lock(&smu->mutex);
2255
2256         if (smu->ppt_funcs->set_fan_speed_percent)
2257                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2258
2259         mutex_unlock(&smu->mutex);
2260
2261         return ret;
2262 }
2263
2264 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2265 {
2266         int ret = 0;
2267
2268         mutex_lock(&smu->mutex);
2269
2270         if (smu->ppt_funcs->get_fan_speed_rpm)
2271                 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2272
2273         mutex_unlock(&smu->mutex);
2274
2275         return ret;
2276 }
2277
2278 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2279 {
2280         int ret = 0;
2281
2282         mutex_lock(&smu->mutex);
2283
2284         if (smu->ppt_funcs->set_deep_sleep_dcefclk)
2285                 ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
2286
2287         mutex_unlock(&smu->mutex);
2288
2289         return ret;
2290 }
2291
2292 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2293 {
2294         int ret = 0;
2295
2296         if (smu->ppt_funcs->set_active_display_count)
2297                 ret = smu->ppt_funcs->set_active_display_count(smu, count);
2298
2299         return ret;
2300 }
2301
2302 int smu_get_clock_by_type(struct smu_context *smu,
2303                           enum amd_pp_clock_type type,
2304                           struct amd_pp_clocks *clocks)
2305 {
2306         int ret = 0;
2307
2308         mutex_lock(&smu->mutex);
2309
2310         if (smu->ppt_funcs->get_clock_by_type)
2311                 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2312
2313         mutex_unlock(&smu->mutex);
2314
2315         return ret;
2316 }
2317
2318 int smu_get_max_high_clocks(struct smu_context *smu,
2319                             struct amd_pp_simple_clock_info *clocks)
2320 {
2321         int ret = 0;
2322
2323         mutex_lock(&smu->mutex);
2324
2325         if (smu->ppt_funcs->get_max_high_clocks)
2326                 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2327
2328         mutex_unlock(&smu->mutex);
2329
2330         return ret;
2331 }
2332
2333 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2334                                        enum smu_clk_type clk_type,
2335                                        struct pp_clock_levels_with_latency *clocks)
2336 {
2337         int ret = 0;
2338
2339         mutex_lock(&smu->mutex);
2340
2341         if (smu->ppt_funcs->get_clock_by_type_with_latency)
2342                 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2343
2344         mutex_unlock(&smu->mutex);
2345
2346         return ret;
2347 }
2348
2349 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2350                                        enum amd_pp_clock_type type,
2351                                        struct pp_clock_levels_with_voltage *clocks)
2352 {
2353         int ret = 0;
2354
2355         mutex_lock(&smu->mutex);
2356
2357         if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2358                 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2359
2360         mutex_unlock(&smu->mutex);
2361
2362         return ret;
2363 }
2364
2365
2366 int smu_display_clock_voltage_request(struct smu_context *smu,
2367                                       struct pp_display_clock_request *clock_req)
2368 {
2369         int ret = 0;
2370
2371         mutex_lock(&smu->mutex);
2372
2373         if (smu->ppt_funcs->display_clock_voltage_request)
2374                 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2375
2376         mutex_unlock(&smu->mutex);
2377
2378         return ret;
2379 }
2380
2381
2382 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2383 {
2384         int ret = -EINVAL;
2385
2386         mutex_lock(&smu->mutex);
2387
2388         if (smu->ppt_funcs->display_disable_memory_clock_switch)
2389                 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2390
2391         mutex_unlock(&smu->mutex);
2392
2393         return ret;
2394 }
2395
2396 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2397 {
2398         int ret = 0;
2399
2400         mutex_lock(&smu->mutex);
2401
2402         if (smu->ppt_funcs->notify_smu_enable_pwe)
2403                 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2404
2405         mutex_unlock(&smu->mutex);
2406
2407         return ret;
2408 }
2409
2410 int smu_set_xgmi_pstate(struct smu_context *smu,
2411                         uint32_t pstate)
2412 {
2413         int ret = 0;
2414
2415         mutex_lock(&smu->mutex);
2416
2417         if (smu->ppt_funcs->set_xgmi_pstate)
2418                 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2419
2420         mutex_unlock(&smu->mutex);
2421
2422         return ret;
2423 }
2424
2425 int smu_set_azalia_d3_pme(struct smu_context *smu)
2426 {
2427         int ret = 0;
2428
2429         mutex_lock(&smu->mutex);
2430
2431         if (smu->ppt_funcs->set_azalia_d3_pme)
2432                 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2433
2434         mutex_unlock(&smu->mutex);
2435
2436         return ret;
2437 }
2438
2439 bool smu_baco_is_support(struct smu_context *smu)
2440 {
2441         bool ret = false;
2442
2443         mutex_lock(&smu->mutex);
2444
2445         if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2446                 ret = smu->ppt_funcs->baco_is_support(smu);
2447
2448         mutex_unlock(&smu->mutex);
2449
2450         return ret;
2451 }
2452
2453 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2454 {
2455         if (smu->ppt_funcs->baco_get_state)
2456                 return -EINVAL;
2457
2458         mutex_lock(&smu->mutex);
2459         *state = smu->ppt_funcs->baco_get_state(smu);
2460         mutex_unlock(&smu->mutex);
2461
2462         return 0;
2463 }
2464
2465 int smu_baco_enter(struct smu_context *smu)
2466 {
2467         int ret = 0;
2468
2469         mutex_lock(&smu->mutex);
2470
2471         if (smu->ppt_funcs->baco_enter)
2472                 ret = smu->ppt_funcs->baco_enter(smu);
2473
2474         mutex_unlock(&smu->mutex);
2475
2476         return ret;
2477 }
2478
2479 int smu_baco_exit(struct smu_context *smu)
2480 {
2481         int ret = 0;
2482
2483         mutex_lock(&smu->mutex);
2484
2485         if (smu->ppt_funcs->baco_exit)
2486                 ret = smu->ppt_funcs->baco_exit(smu);
2487
2488         mutex_unlock(&smu->mutex);
2489
2490         return ret;
2491 }
2492
2493 int smu_mode2_reset(struct smu_context *smu)
2494 {
2495         int ret = 0;
2496
2497         mutex_lock(&smu->mutex);
2498
2499         if (smu->ppt_funcs->mode2_reset)
2500                 ret = smu->ppt_funcs->mode2_reset(smu);
2501
2502         mutex_unlock(&smu->mutex);
2503
2504         return ret;
2505 }
2506
2507 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2508                                          struct pp_smu_nv_clock_table *max_clocks)
2509 {
2510         int ret = 0;
2511
2512         mutex_lock(&smu->mutex);
2513
2514         if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2515                 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2516
2517         mutex_unlock(&smu->mutex);
2518
2519         return ret;
2520 }
2521
2522 int smu_get_uclk_dpm_states(struct smu_context *smu,
2523                             unsigned int *clock_values_in_khz,
2524                             unsigned int *num_states)
2525 {
2526         int ret = 0;
2527
2528         mutex_lock(&smu->mutex);
2529
2530         if (smu->ppt_funcs->get_uclk_dpm_states)
2531                 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2532
2533         mutex_unlock(&smu->mutex);
2534
2535         return ret;
2536 }
2537
2538 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2539 {
2540         enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2541
2542         mutex_lock(&smu->mutex);
2543
2544         if (smu->ppt_funcs->get_current_power_state)
2545                 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2546
2547         mutex_unlock(&smu->mutex);
2548
2549         return pm_state;
2550 }
2551
2552 int smu_get_dpm_clock_table(struct smu_context *smu,
2553                             struct dpm_clocks *clock_table)
2554 {
2555         int ret = 0;
2556
2557         mutex_lock(&smu->mutex);
2558
2559         if (smu->ppt_funcs->get_dpm_clock_table)
2560                 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2561
2562         mutex_unlock(&smu->mutex);
2563
2564         return ret;
2565 }
2566
2567 uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
2568 {
2569         uint32_t ret = 0;
2570
2571         if (smu->ppt_funcs->get_pptable_power_limit)
2572                 ret = smu->ppt_funcs->get_pptable_power_limit(smu);
2573
2574         return ret;
2575 }
2576
2577 int smu_send_smc_msg(struct smu_context *smu,
2578                      enum smu_message_type msg)
2579 {
2580         int ret;
2581
2582         ret = smu_send_smc_msg_with_param(smu, msg, 0);
2583         return ret;
2584 }