]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drm/amd/powerplay: retrieve the enabled feature mask from cache
[linux.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/firmware.h>
24
25 #include "pp_debug.h"
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "soc15_common.h"
30 #include "smu_v11_0.h"
31 #include "smu_v12_0.h"
32 #include "atom.h"
33 #include "amd_pcie.h"
34 #include "vega20_ppt.h"
35 #include "arcturus_ppt.h"
36 #include "navi10_ppt.h"
37 #include "renoir_ppt.h"
38
39 #undef __SMU_DUMMY_MAP
40 #define __SMU_DUMMY_MAP(type)   #type
41 static const char* __smu_message_names[] = {
42         SMU_MESSAGE_TYPES
43 };
44
45 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
46 {
47         if (type < 0 || type >= SMU_MSG_MAX_COUNT)
48                 return "unknown smu message";
49         return __smu_message_names[type];
50 }
51
52 #undef __SMU_DUMMY_MAP
53 #define __SMU_DUMMY_MAP(fea)    #fea
54 static const char* __smu_feature_names[] = {
55         SMU_FEATURE_MASKS
56 };
57
58 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
59 {
60         if (feature < 0 || feature >= SMU_FEATURE_COUNT)
61                 return "unknown smu feature";
62         return __smu_feature_names[feature];
63 }
64
65 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
66 {
67         size_t size = 0;
68         int ret = 0, i = 0;
69         uint32_t feature_mask[2] = { 0 };
70         int32_t feature_index = 0;
71         uint32_t count = 0;
72         uint32_t sort_feature[SMU_FEATURE_COUNT];
73         uint64_t hw_feature_count = 0;
74
75         mutex_lock(&smu->mutex);
76
77         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
78         if (ret)
79                 goto failed;
80
81         size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
82                         feature_mask[1], feature_mask[0]);
83
84         for (i = 0; i < SMU_FEATURE_COUNT; i++) {
85                 feature_index = smu_feature_get_index(smu, i);
86                 if (feature_index < 0)
87                         continue;
88                 sort_feature[feature_index] = i;
89                 hw_feature_count++;
90         }
91
92         for (i = 0; i < hw_feature_count; i++) {
93                 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
94                                count++,
95                                smu_get_feature_name(smu, sort_feature[i]),
96                                i,
97                                !!smu_feature_is_enabled(smu, sort_feature[i]) ?
98                                "enabled" : "disabled");
99         }
100
101 failed:
102         mutex_unlock(&smu->mutex);
103
104         return size;
105 }
106
107 static int smu_feature_update_enable_state(struct smu_context *smu,
108                                            uint64_t feature_mask,
109                                            bool enabled)
110 {
111         struct smu_feature *feature = &smu->smu_feature;
112         uint32_t feature_low = 0, feature_high = 0;
113         int ret = 0;
114
115         if (!smu->pm_enabled)
116                 return ret;
117
118         feature_low = (feature_mask >> 0 ) & 0xffffffff;
119         feature_high = (feature_mask >> 32) & 0xffffffff;
120
121         if (enabled) {
122                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
123                                                   feature_low);
124                 if (ret)
125                         return ret;
126                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
127                                                   feature_high);
128                 if (ret)
129                         return ret;
130         } else {
131                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
132                                                   feature_low);
133                 if (ret)
134                         return ret;
135                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
136                                                   feature_high);
137                 if (ret)
138                         return ret;
139         }
140
141         mutex_lock(&feature->mutex);
142         if (enabled)
143                 bitmap_or(feature->enabled, feature->enabled,
144                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
145         else
146                 bitmap_andnot(feature->enabled, feature->enabled,
147                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
148         mutex_unlock(&feature->mutex);
149
150         return ret;
151 }
152
153 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
154 {
155         int ret = 0;
156         uint32_t feature_mask[2] = { 0 };
157         uint64_t feature_2_enabled = 0;
158         uint64_t feature_2_disabled = 0;
159         uint64_t feature_enables = 0;
160
161         mutex_lock(&smu->mutex);
162
163         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
164         if (ret)
165                 goto out;
166
167         feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
168
169         feature_2_enabled  = ~feature_enables & new_mask;
170         feature_2_disabled = feature_enables & ~new_mask;
171
172         if (feature_2_enabled) {
173                 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
174                 if (ret)
175                         goto out;
176         }
177         if (feature_2_disabled) {
178                 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
179                 if (ret)
180                         goto out;
181         }
182
183 out:
184         mutex_unlock(&smu->mutex);
185
186         return ret;
187 }
188
189 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
190 {
191         int ret = 0;
192
193         if (!if_version && !smu_version)
194                 return -EINVAL;
195
196         if (if_version) {
197                 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
198                 if (ret)
199                         return ret;
200
201                 ret = smu_read_smc_arg(smu, if_version);
202                 if (ret)
203                         return ret;
204         }
205
206         if (smu_version) {
207                 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
208                 if (ret)
209                         return ret;
210
211                 ret = smu_read_smc_arg(smu, smu_version);
212                 if (ret)
213                         return ret;
214         }
215
216         return ret;
217 }
218
219 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
220                             uint32_t min, uint32_t max)
221 {
222         int ret = 0;
223
224         if (min <= 0 && max <= 0)
225                 return -EINVAL;
226
227         if (!smu_clk_dpm_is_enabled(smu, clk_type))
228                 return 0;
229
230         ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
231         return ret;
232 }
233
234 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
235                             uint32_t min, uint32_t max)
236 {
237         int ret = 0, clk_id = 0;
238         uint32_t param;
239
240         if (min <= 0 && max <= 0)
241                 return -EINVAL;
242
243         if (!smu_clk_dpm_is_enabled(smu, clk_type))
244                 return 0;
245
246         clk_id = smu_clk_get_index(smu, clk_type);
247         if (clk_id < 0)
248                 return clk_id;
249
250         if (max > 0) {
251                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
252                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
253                                                   param);
254                 if (ret)
255                         return ret;
256         }
257
258         if (min > 0) {
259                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
260                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
261                                                   param);
262                 if (ret)
263                         return ret;
264         }
265
266
267         return ret;
268 }
269
270 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
271                            uint32_t *min, uint32_t *max, bool lock_needed)
272 {
273         uint32_t clock_limit;
274         int ret = 0;
275
276         if (!min && !max)
277                 return -EINVAL;
278
279         if (lock_needed)
280                 mutex_lock(&smu->mutex);
281
282         if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
283                 switch (clk_type) {
284                 case SMU_MCLK:
285                 case SMU_UCLK:
286                         clock_limit = smu->smu_table.boot_values.uclk;
287                         break;
288                 case SMU_GFXCLK:
289                 case SMU_SCLK:
290                         clock_limit = smu->smu_table.boot_values.gfxclk;
291                         break;
292                 case SMU_SOCCLK:
293                         clock_limit = smu->smu_table.boot_values.socclk;
294                         break;
295                 default:
296                         clock_limit = 0;
297                         break;
298                 }
299
300                 /* clock in Mhz unit */
301                 if (min)
302                         *min = clock_limit / 100;
303                 if (max)
304                         *max = clock_limit / 100;
305         } else {
306                 /*
307                  * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
308                  * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
309                  */
310                 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
311         }
312
313         if (lock_needed)
314                 mutex_unlock(&smu->mutex);
315
316         return ret;
317 }
318
319 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
320                               uint16_t level, uint32_t *value)
321 {
322         int ret = 0, clk_id = 0;
323         uint32_t param;
324
325         if (!value)
326                 return -EINVAL;
327
328         if (!smu_clk_dpm_is_enabled(smu, clk_type))
329                 return 0;
330
331         clk_id = smu_clk_get_index(smu, clk_type);
332         if (clk_id < 0)
333                 return clk_id;
334
335         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
336
337         ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
338                                           param);
339         if (ret)
340                 return ret;
341
342         ret = smu_read_smc_arg(smu, &param);
343         if (ret)
344                 return ret;
345
346         /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
347          * now, we un-support it */
348         *value = param & 0x7fffffff;
349
350         return ret;
351 }
352
353 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
354                             uint32_t *value)
355 {
356         return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
357 }
358
359 int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
360                             uint32_t *min_value, uint32_t *max_value)
361 {
362         int ret = 0;
363         uint32_t level_count = 0;
364
365         if (!min_value && !max_value)
366                 return -EINVAL;
367
368         if (min_value) {
369                 /* by default, level 0 clock value as min value */
370                 ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
371                 if (ret)
372                         return ret;
373         }
374
375         if (max_value) {
376                 ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
377                 if (ret)
378                         return ret;
379
380                 ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
381                 if (ret)
382                         return ret;
383         }
384
385         return ret;
386 }
387
388 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
389 {
390         enum smu_feature_mask feature_id = 0;
391
392         switch (clk_type) {
393         case SMU_MCLK:
394         case SMU_UCLK:
395                 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
396                 break;
397         case SMU_GFXCLK:
398         case SMU_SCLK:
399                 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
400                 break;
401         case SMU_SOCCLK:
402                 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
403                 break;
404         default:
405                 return true;
406         }
407
408         if(!smu_feature_is_enabled(smu, feature_id)) {
409                 return false;
410         }
411
412         return true;
413 }
414
415 /**
416  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
417  *
418  * @smu:        smu_context pointer
419  * @block_type: the IP block to power gate/ungate
420  * @gate:       to power gate if true, ungate otherwise
421  *
422  * This API uses no smu->mutex lock protection due to:
423  * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
424  *    This is guarded to be race condition free by the caller.
425  * 2. Or get called on user setting request of power_dpm_force_performance_level.
426  *    Under this case, the smu->mutex lock protection is already enforced on
427  *    the parent API smu_force_performance_level of the call path.
428  */
429 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
430                            bool gate)
431 {
432         int ret = 0;
433
434         switch (block_type) {
435         case AMD_IP_BLOCK_TYPE_UVD:
436                 ret = smu_dpm_set_uvd_enable(smu, gate);
437                 break;
438         case AMD_IP_BLOCK_TYPE_VCE:
439                 ret = smu_dpm_set_vce_enable(smu, gate);
440                 break;
441         case AMD_IP_BLOCK_TYPE_GFX:
442                 ret = smu_gfx_off_control(smu, gate);
443                 break;
444         case AMD_IP_BLOCK_TYPE_SDMA:
445                 ret = smu_powergate_sdma(smu, gate);
446                 break;
447         case AMD_IP_BLOCK_TYPE_JPEG:
448                 ret = smu_dpm_set_jpeg_enable(smu, gate);
449                 break;
450         default:
451                 break;
452         }
453
454         return ret;
455 }
456
457 int smu_get_power_num_states(struct smu_context *smu,
458                              struct pp_states_info *state_info)
459 {
460         if (!state_info)
461                 return -EINVAL;
462
463         /* not support power state */
464         memset(state_info, 0, sizeof(struct pp_states_info));
465         state_info->nums = 1;
466         state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
467
468         return 0;
469 }
470
471 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
472                            void *data, uint32_t *size)
473 {
474         struct smu_power_context *smu_power = &smu->smu_power;
475         struct smu_power_gate *power_gate = &smu_power->power_gate;
476         int ret = 0;
477
478         if(!data || !size)
479                 return -EINVAL;
480
481         switch (sensor) {
482         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
483                 *((uint32_t *)data) = smu->pstate_sclk;
484                 *size = 4;
485                 break;
486         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
487                 *((uint32_t *)data) = smu->pstate_mclk;
488                 *size = 4;
489                 break;
490         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
491                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
492                 *size = 8;
493                 break;
494         case AMDGPU_PP_SENSOR_UVD_POWER:
495                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
496                 *size = 4;
497                 break;
498         case AMDGPU_PP_SENSOR_VCE_POWER:
499                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
500                 *size = 4;
501                 break;
502         case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
503                 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
504                 *size = 4;
505                 break;
506         default:
507                 ret = -EINVAL;
508                 break;
509         }
510
511         if (ret)
512                 *size = 0;
513
514         return ret;
515 }
516
517 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
518                      void *table_data, bool drv2smu)
519 {
520         struct smu_table_context *smu_table = &smu->smu_table;
521         struct amdgpu_device *adev = smu->adev;
522         struct smu_table *table = NULL;
523         int ret = 0;
524         int table_id = smu_table_get_index(smu, table_index);
525
526         if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
527                 return -EINVAL;
528
529         table = &smu_table->tables[table_index];
530
531         if (drv2smu)
532                 memcpy(table->cpu_addr, table_data, table->size);
533
534         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
535                                           upper_32_bits(table->mc_address));
536         if (ret)
537                 return ret;
538         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
539                                           lower_32_bits(table->mc_address));
540         if (ret)
541                 return ret;
542         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
543                                           SMU_MSG_TransferTableDram2Smu :
544                                           SMU_MSG_TransferTableSmu2Dram,
545                                           table_id | ((argument & 0xFFFF) << 16));
546         if (ret)
547                 return ret;
548
549         /* flush hdp cache */
550         adev->nbio.funcs->hdp_flush(adev, NULL);
551
552         if (!drv2smu)
553                 memcpy(table_data, table->cpu_addr, table->size);
554
555         return ret;
556 }
557
558 bool is_support_sw_smu(struct amdgpu_device *adev)
559 {
560         if (adev->asic_type == CHIP_VEGA20)
561                 return (amdgpu_dpm == 2) ? true : false;
562         else if (adev->asic_type >= CHIP_ARCTURUS) {
563                 if (amdgpu_sriov_vf(adev))
564                         return false;
565                 else
566                         return true;
567         } else
568                 return false;
569 }
570
571 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
572 {
573         if (!is_support_sw_smu(adev))
574                 return false;
575
576         if (adev->asic_type == CHIP_VEGA20)
577                 return true;
578
579         return false;
580 }
581
582 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
583 {
584         struct smu_table_context *smu_table = &smu->smu_table;
585         uint32_t powerplay_table_size;
586
587         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
588                 return -EINVAL;
589
590         mutex_lock(&smu->mutex);
591
592         if (smu_table->hardcode_pptable)
593                 *table = smu_table->hardcode_pptable;
594         else
595                 *table = smu_table->power_play_table;
596
597         powerplay_table_size = smu_table->power_play_table_size;
598
599         mutex_unlock(&smu->mutex);
600
601         return powerplay_table_size;
602 }
603
604 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
605 {
606         struct smu_table_context *smu_table = &smu->smu_table;
607         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
608         int ret = 0;
609
610         if (!smu->pm_enabled)
611                 return -EINVAL;
612         if (header->usStructureSize != size) {
613                 pr_err("pp table size not matched !\n");
614                 return -EIO;
615         }
616
617         mutex_lock(&smu->mutex);
618         if (!smu_table->hardcode_pptable)
619                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
620         if (!smu_table->hardcode_pptable) {
621                 ret = -ENOMEM;
622                 goto failed;
623         }
624
625         memcpy(smu_table->hardcode_pptable, buf, size);
626         smu_table->power_play_table = smu_table->hardcode_pptable;
627         smu_table->power_play_table_size = size;
628
629         /*
630          * Special hw_fini action(for Navi1x, the DPMs disablement will be
631          * skipped) may be needed for custom pptable uploading.
632          */
633         smu->uploading_custom_pp_table = true;
634
635         ret = smu_reset(smu);
636         if (ret)
637                 pr_info("smu reset failed, ret = %d\n", ret);
638
639         smu->uploading_custom_pp_table = false;
640
641 failed:
642         mutex_unlock(&smu->mutex);
643         return ret;
644 }
645
646 int smu_feature_init_dpm(struct smu_context *smu)
647 {
648         struct smu_feature *feature = &smu->smu_feature;
649         int ret = 0;
650         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
651
652         if (!smu->pm_enabled)
653                 return ret;
654         mutex_lock(&feature->mutex);
655         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
656         mutex_unlock(&feature->mutex);
657
658         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
659                                              SMU_FEATURE_MAX/32);
660         if (ret)
661                 return ret;
662
663         mutex_lock(&feature->mutex);
664         bitmap_or(feature->allowed, feature->allowed,
665                       (unsigned long *)allowed_feature_mask,
666                       feature->feature_num);
667         mutex_unlock(&feature->mutex);
668
669         return ret;
670 }
671
672
673 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
674 {
675         struct smu_feature *feature = &smu->smu_feature;
676         int feature_id;
677         int ret = 0;
678
679         if (smu->is_apu)
680                 return 1;
681
682         feature_id = smu_feature_get_index(smu, mask);
683         if (feature_id < 0)
684                 return 0;
685
686         WARN_ON(feature_id > feature->feature_num);
687
688         mutex_lock(&feature->mutex);
689         ret = test_bit(feature_id, feature->enabled);
690         mutex_unlock(&feature->mutex);
691
692         return ret;
693 }
694
695 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
696                             bool enable)
697 {
698         struct smu_feature *feature = &smu->smu_feature;
699         int feature_id;
700
701         feature_id = smu_feature_get_index(smu, mask);
702         if (feature_id < 0)
703                 return -EINVAL;
704
705         WARN_ON(feature_id > feature->feature_num);
706
707         return smu_feature_update_enable_state(smu,
708                                                1ULL << feature_id,
709                                                enable);
710 }
711
712 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
713 {
714         struct smu_feature *feature = &smu->smu_feature;
715         int feature_id;
716         int ret = 0;
717
718         feature_id = smu_feature_get_index(smu, mask);
719         if (feature_id < 0)
720                 return 0;
721
722         WARN_ON(feature_id > feature->feature_num);
723
724         mutex_lock(&feature->mutex);
725         ret = test_bit(feature_id, feature->supported);
726         mutex_unlock(&feature->mutex);
727
728         return ret;
729 }
730
731 int smu_feature_set_supported(struct smu_context *smu,
732                               enum smu_feature_mask mask,
733                               bool enable)
734 {
735         struct smu_feature *feature = &smu->smu_feature;
736         int feature_id;
737         int ret = 0;
738
739         feature_id = smu_feature_get_index(smu, mask);
740         if (feature_id < 0)
741                 return -EINVAL;
742
743         WARN_ON(feature_id > feature->feature_num);
744
745         mutex_lock(&feature->mutex);
746         if (enable)
747                 test_and_set_bit(feature_id, feature->supported);
748         else
749                 test_and_clear_bit(feature_id, feature->supported);
750         mutex_unlock(&feature->mutex);
751
752         return ret;
753 }
754
755 static int smu_set_funcs(struct amdgpu_device *adev)
756 {
757         struct smu_context *smu = &adev->smu;
758
759         if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
760                 smu->od_enabled = true;
761
762         switch (adev->asic_type) {
763         case CHIP_VEGA20:
764                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
765                 vega20_set_ppt_funcs(smu);
766                 break;
767         case CHIP_NAVI10:
768         case CHIP_NAVI14:
769         case CHIP_NAVI12:
770                 navi10_set_ppt_funcs(smu);
771                 break;
772         case CHIP_ARCTURUS:
773                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
774                 arcturus_set_ppt_funcs(smu);
775                 /* OD is not supported on Arcturus */
776                 smu->od_enabled =false;
777                 break;
778         case CHIP_RENOIR:
779                 renoir_set_ppt_funcs(smu);
780                 break;
781         default:
782                 return -EINVAL;
783         }
784
785         return 0;
786 }
787
788 static int smu_early_init(void *handle)
789 {
790         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
791         struct smu_context *smu = &adev->smu;
792
793         smu->adev = adev;
794         smu->pm_enabled = !!amdgpu_dpm;
795         smu->is_apu = false;
796         mutex_init(&smu->mutex);
797
798         return smu_set_funcs(adev);
799 }
800
801 static int smu_late_init(void *handle)
802 {
803         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
804         struct smu_context *smu = &adev->smu;
805
806         if (!smu->pm_enabled)
807                 return 0;
808
809         smu_handle_task(&adev->smu,
810                         smu->smu_dpm.dpm_level,
811                         AMD_PP_TASK_COMPLETE_INIT,
812                         false);
813
814         return 0;
815 }
816
817 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
818                             uint16_t *size, uint8_t *frev, uint8_t *crev,
819                             uint8_t **addr)
820 {
821         struct amdgpu_device *adev = smu->adev;
822         uint16_t data_start;
823
824         if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
825                                            size, frev, crev, &data_start))
826                 return -EINVAL;
827
828         *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
829
830         return 0;
831 }
832
833 static int smu_initialize_pptable(struct smu_context *smu)
834 {
835         /* TODO */
836         return 0;
837 }
838
839 static int smu_smc_table_sw_init(struct smu_context *smu)
840 {
841         int ret;
842
843         ret = smu_initialize_pptable(smu);
844         if (ret) {
845                 pr_err("Failed to init smu_initialize_pptable!\n");
846                 return ret;
847         }
848
849         /**
850          * Create smu_table structure, and init smc tables such as
851          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
852          */
853         ret = smu_init_smc_tables(smu);
854         if (ret) {
855                 pr_err("Failed to init smc tables!\n");
856                 return ret;
857         }
858
859         /**
860          * Create smu_power_context structure, and allocate smu_dpm_context and
861          * context size to fill the smu_power_context data.
862          */
863         ret = smu_init_power(smu);
864         if (ret) {
865                 pr_err("Failed to init smu_init_power!\n");
866                 return ret;
867         }
868
869         return 0;
870 }
871
872 static int smu_smc_table_sw_fini(struct smu_context *smu)
873 {
874         int ret;
875
876         ret = smu_fini_smc_tables(smu);
877         if (ret) {
878                 pr_err("Failed to smu_fini_smc_tables!\n");
879                 return ret;
880         }
881
882         return 0;
883 }
884
885 static int smu_sw_init(void *handle)
886 {
887         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
888         struct smu_context *smu = &adev->smu;
889         int ret;
890
891         smu->pool_size = adev->pm.smu_prv_buffer_size;
892         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
893         mutex_init(&smu->smu_feature.mutex);
894         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
895         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
896         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
897
898         mutex_init(&smu->smu_baco.mutex);
899         smu->smu_baco.state = SMU_BACO_STATE_EXIT;
900         smu->smu_baco.platform_support = false;
901
902         mutex_init(&smu->sensor_lock);
903         mutex_init(&smu->metrics_lock);
904
905         smu->watermarks_bitmap = 0;
906         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
907         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
908
909         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
910         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
911         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
912         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
913         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
914         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
915         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
916         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
917
918         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
919         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
920         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
921         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
922         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
923         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
924         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
925         smu->display_config = &adev->pm.pm_display_cfg;
926
927         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
928         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
929         ret = smu_init_microcode(smu);
930         if (ret) {
931                 pr_err("Failed to load smu firmware!\n");
932                 return ret;
933         }
934
935         ret = smu_smc_table_sw_init(smu);
936         if (ret) {
937                 pr_err("Failed to sw init smc table!\n");
938                 return ret;
939         }
940
941         ret = smu_register_irq_handler(smu);
942         if (ret) {
943                 pr_err("Failed to register smc irq handler!\n");
944                 return ret;
945         }
946
947         return 0;
948 }
949
950 static int smu_sw_fini(void *handle)
951 {
952         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
953         struct smu_context *smu = &adev->smu;
954         int ret;
955
956         kfree(smu->irq_source);
957         smu->irq_source = NULL;
958
959         ret = smu_smc_table_sw_fini(smu);
960         if (ret) {
961                 pr_err("Failed to sw fini smc table!\n");
962                 return ret;
963         }
964
965         ret = smu_fini_power(smu);
966         if (ret) {
967                 pr_err("Failed to init smu_fini_power!\n");
968                 return ret;
969         }
970
971         return 0;
972 }
973
974 static int smu_init_fb_allocations(struct smu_context *smu)
975 {
976         struct amdgpu_device *adev = smu->adev;
977         struct smu_table_context *smu_table = &smu->smu_table;
978         struct smu_table *tables = smu_table->tables;
979         int ret, i;
980
981         for (i = 0; i < SMU_TABLE_COUNT; i++) {
982                 if (tables[i].size == 0)
983                         continue;
984                 ret = amdgpu_bo_create_kernel(adev,
985                                               tables[i].size,
986                                               tables[i].align,
987                                               tables[i].domain,
988                                               &tables[i].bo,
989                                               &tables[i].mc_address,
990                                               &tables[i].cpu_addr);
991                 if (ret)
992                         goto failed;
993         }
994
995         return 0;
996 failed:
997         while (--i >= 0) {
998                 if (tables[i].size == 0)
999                         continue;
1000                 amdgpu_bo_free_kernel(&tables[i].bo,
1001                                       &tables[i].mc_address,
1002                                       &tables[i].cpu_addr);
1003
1004         }
1005         return ret;
1006 }
1007
1008 static int smu_fini_fb_allocations(struct smu_context *smu)
1009 {
1010         struct smu_table_context *smu_table = &smu->smu_table;
1011         struct smu_table *tables = smu_table->tables;
1012         uint32_t i = 0;
1013
1014         if (!tables)
1015                 return 0;
1016
1017         for (i = 0; i < SMU_TABLE_COUNT; i++) {
1018                 if (tables[i].size == 0)
1019                         continue;
1020                 amdgpu_bo_free_kernel(&tables[i].bo,
1021                                       &tables[i].mc_address,
1022                                       &tables[i].cpu_addr);
1023         }
1024
1025         return 0;
1026 }
1027
1028 static int smu_smc_table_hw_init(struct smu_context *smu,
1029                                  bool initialize)
1030 {
1031         struct amdgpu_device *adev = smu->adev;
1032         int ret;
1033
1034         if (smu_is_dpm_running(smu) && adev->in_suspend) {
1035                 pr_info("dpm has been enabled\n");
1036                 return 0;
1037         }
1038
1039         if (adev->asic_type != CHIP_ARCTURUS) {
1040                 ret = smu_init_display_count(smu, 0);
1041                 if (ret)
1042                         return ret;
1043         }
1044
1045         if (initialize) {
1046                 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1047                 ret = smu_get_vbios_bootup_values(smu);
1048                 if (ret)
1049                         return ret;
1050
1051                 ret = smu_setup_pptable(smu);
1052                 if (ret)
1053                         return ret;
1054
1055                 ret = smu_get_clk_info_from_vbios(smu);
1056                 if (ret)
1057                         return ret;
1058
1059                 /*
1060                  * check if the format_revision in vbios is up to pptable header
1061                  * version, and the structure size is not 0.
1062                  */
1063                 ret = smu_check_pptable(smu);
1064                 if (ret)
1065                         return ret;
1066
1067                 /*
1068                  * allocate vram bos to store smc table contents.
1069                  */
1070                 ret = smu_init_fb_allocations(smu);
1071                 if (ret)
1072                         return ret;
1073
1074                 /*
1075                  * Parse pptable format and fill PPTable_t smc_pptable to
1076                  * smu_table_context structure. And read the smc_dpm_table from vbios,
1077                  * then fill it into smc_pptable.
1078                  */
1079                 ret = smu_parse_pptable(smu);
1080                 if (ret)
1081                         return ret;
1082
1083                 /*
1084                  * Send msg GetDriverIfVersion to check if the return value is equal
1085                  * with DRIVER_IF_VERSION of smc header.
1086                  */
1087                 ret = smu_check_fw_version(smu);
1088                 if (ret)
1089                         return ret;
1090         }
1091
1092         /* smu_dump_pptable(smu); */
1093
1094         /*
1095          * Copy pptable bo in the vram to smc with SMU MSGs such as
1096          * SetDriverDramAddr and TransferTableDram2Smu.
1097          */
1098         ret = smu_write_pptable(smu);
1099         if (ret)
1100                 return ret;
1101
1102         /* issue Run*Btc msg */
1103         ret = smu_run_btc(smu);
1104         if (ret)
1105                 return ret;
1106
1107         ret = smu_feature_set_allowed_mask(smu);
1108         if (ret)
1109                 return ret;
1110
1111         ret = smu_system_features_control(smu, true);
1112         if (ret)
1113                 return ret;
1114
1115         if (adev->asic_type != CHIP_ARCTURUS) {
1116                 ret = smu_notify_display_change(smu);
1117                 if (ret)
1118                         return ret;
1119
1120                 /*
1121                  * Set min deep sleep dce fclk with bootup value from vbios via
1122                  * SetMinDeepSleepDcefclk MSG.
1123                  */
1124                 ret = smu_set_min_dcef_deep_sleep(smu);
1125                 if (ret)
1126                         return ret;
1127         }
1128
1129         /*
1130          * Set initialized values (get from vbios) to dpm tables context such as
1131          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1132          * type of clks.
1133          */
1134         if (initialize) {
1135                 ret = smu_populate_smc_tables(smu);
1136                 if (ret)
1137                         return ret;
1138
1139                 ret = smu_init_max_sustainable_clocks(smu);
1140                 if (ret)
1141                         return ret;
1142         }
1143
1144         if (adev->asic_type != CHIP_ARCTURUS) {
1145                 ret = smu_override_pcie_parameters(smu);
1146                 if (ret)
1147                         return ret;
1148         }
1149
1150         ret = smu_set_default_od_settings(smu, initialize);
1151         if (ret)
1152                 return ret;
1153
1154         if (initialize) {
1155                 ret = smu_populate_umd_state_clk(smu);
1156                 if (ret)
1157                         return ret;
1158
1159                 ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
1160                 if (ret)
1161                         return ret;
1162         }
1163
1164         /*
1165          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1166          */
1167         ret = smu_set_tool_table_location(smu);
1168
1169         if (!smu_is_dpm_running(smu))
1170                 pr_info("dpm has been disabled\n");
1171
1172         return ret;
1173 }
1174
1175 /**
1176  * smu_alloc_memory_pool - allocate memory pool in the system memory
1177  *
1178  * @smu: amdgpu_device pointer
1179  *
1180  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1181  * and DramLogSetDramAddr can notify it changed.
1182  *
1183  * Returns 0 on success, error on failure.
1184  */
1185 static int smu_alloc_memory_pool(struct smu_context *smu)
1186 {
1187         struct amdgpu_device *adev = smu->adev;
1188         struct smu_table_context *smu_table = &smu->smu_table;
1189         struct smu_table *memory_pool = &smu_table->memory_pool;
1190         uint64_t pool_size = smu->pool_size;
1191         int ret = 0;
1192
1193         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1194                 return ret;
1195
1196         memory_pool->size = pool_size;
1197         memory_pool->align = PAGE_SIZE;
1198         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1199
1200         switch (pool_size) {
1201         case SMU_MEMORY_POOL_SIZE_256_MB:
1202         case SMU_MEMORY_POOL_SIZE_512_MB:
1203         case SMU_MEMORY_POOL_SIZE_1_GB:
1204         case SMU_MEMORY_POOL_SIZE_2_GB:
1205                 ret = amdgpu_bo_create_kernel(adev,
1206                                               memory_pool->size,
1207                                               memory_pool->align,
1208                                               memory_pool->domain,
1209                                               &memory_pool->bo,
1210                                               &memory_pool->mc_address,
1211                                               &memory_pool->cpu_addr);
1212                 break;
1213         default:
1214                 break;
1215         }
1216
1217         return ret;
1218 }
1219
1220 static int smu_free_memory_pool(struct smu_context *smu)
1221 {
1222         struct smu_table_context *smu_table = &smu->smu_table;
1223         struct smu_table *memory_pool = &smu_table->memory_pool;
1224
1225         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1226                 return 0;
1227
1228         amdgpu_bo_free_kernel(&memory_pool->bo,
1229                               &memory_pool->mc_address,
1230                               &memory_pool->cpu_addr);
1231
1232         memset(memory_pool, 0, sizeof(struct smu_table));
1233
1234         return 0;
1235 }
1236
1237 static int smu_start_smc_engine(struct smu_context *smu)
1238 {
1239         struct amdgpu_device *adev = smu->adev;
1240         int ret = 0;
1241
1242         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1243                 if (adev->asic_type < CHIP_NAVI10) {
1244                         if (smu->ppt_funcs->load_microcode) {
1245                                 ret = smu->ppt_funcs->load_microcode(smu);
1246                                 if (ret)
1247                                         return ret;
1248                         }
1249                 }
1250         }
1251
1252         if (smu->ppt_funcs->check_fw_status) {
1253                 ret = smu->ppt_funcs->check_fw_status(smu);
1254                 if (ret)
1255                         pr_err("SMC is not ready\n");
1256         }
1257
1258         return ret;
1259 }
1260
1261 static int smu_hw_init(void *handle)
1262 {
1263         int ret;
1264         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1265         struct smu_context *smu = &adev->smu;
1266
1267         ret = smu_start_smc_engine(smu);
1268         if (ret) {
1269                 pr_err("SMU is not ready yet!\n");
1270                 return ret;
1271         }
1272
1273         if (smu->is_apu) {
1274                 smu_powergate_sdma(&adev->smu, false);
1275                 smu_powergate_vcn(&adev->smu, false);
1276                 smu_powergate_jpeg(&adev->smu, false);
1277                 smu_set_gfx_cgpg(&adev->smu, true);
1278         }
1279
1280         if (!smu->pm_enabled)
1281                 return 0;
1282
1283         ret = smu_feature_init_dpm(smu);
1284         if (ret)
1285                 goto failed;
1286
1287         ret = smu_smc_table_hw_init(smu, true);
1288         if (ret)
1289                 goto failed;
1290
1291         ret = smu_alloc_memory_pool(smu);
1292         if (ret)
1293                 goto failed;
1294
1295         /*
1296          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1297          * pool location.
1298          */
1299         ret = smu_notify_memory_pool_location(smu);
1300         if (ret)
1301                 goto failed;
1302
1303         ret = smu_start_thermal_control(smu);
1304         if (ret)
1305                 goto failed;
1306
1307         if (!smu->pm_enabled)
1308                 adev->pm.dpm_enabled = false;
1309         else
1310                 adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1311
1312         pr_info("SMU is initialized successfully!\n");
1313
1314         return 0;
1315
1316 failed:
1317         return ret;
1318 }
1319
1320 static int smu_stop_dpms(struct smu_context *smu)
1321 {
1322         return smu_system_features_control(smu, false);
1323 }
1324
1325 static int smu_hw_fini(void *handle)
1326 {
1327         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1328         struct smu_context *smu = &adev->smu;
1329         struct smu_table_context *table_context = &smu->smu_table;
1330         int ret = 0;
1331
1332         if (smu->is_apu) {
1333                 smu_powergate_sdma(&adev->smu, true);
1334                 smu_powergate_vcn(&adev->smu, true);
1335                 smu_powergate_jpeg(&adev->smu, true);
1336         }
1337
1338         ret = smu_stop_thermal_control(smu);
1339         if (ret) {
1340                 pr_warn("Fail to stop thermal control!\n");
1341                 return ret;
1342         }
1343
1344         /*
1345          * For custom pptable uploading, skip the DPM features
1346          * disable process on Navi1x ASICs.
1347          *   - As the gfx related features are under control of
1348          *     RLC on those ASICs. RLC reinitialization will be
1349          *     needed to reenable them. That will cost much more
1350          *     efforts.
1351          *
1352          *   - SMU firmware can handle the DPM reenablement
1353          *     properly.
1354          */
1355         if (!smu->uploading_custom_pp_table ||
1356             !((adev->asic_type >= CHIP_NAVI10) &&
1357               (adev->asic_type <= CHIP_NAVI12))) {
1358                 ret = smu_stop_dpms(smu);
1359                 if (ret) {
1360                         pr_warn("Fail to stop Dpms!\n");
1361                         return ret;
1362                 }
1363         }
1364
1365         kfree(table_context->driver_pptable);
1366         table_context->driver_pptable = NULL;
1367
1368         kfree(table_context->max_sustainable_clocks);
1369         table_context->max_sustainable_clocks = NULL;
1370
1371         kfree(table_context->overdrive_table);
1372         table_context->overdrive_table = NULL;
1373
1374         ret = smu_fini_fb_allocations(smu);
1375         if (ret)
1376                 return ret;
1377
1378         ret = smu_free_memory_pool(smu);
1379         if (ret)
1380                 return ret;
1381
1382         return 0;
1383 }
1384
1385 int smu_reset(struct smu_context *smu)
1386 {
1387         struct amdgpu_device *adev = smu->adev;
1388         int ret = 0;
1389
1390         ret = smu_hw_fini(adev);
1391         if (ret)
1392                 return ret;
1393
1394         ret = smu_hw_init(adev);
1395         if (ret)
1396                 return ret;
1397
1398         return ret;
1399 }
1400
1401 static int smu_suspend(void *handle)
1402 {
1403         int ret;
1404         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1405         struct smu_context *smu = &adev->smu;
1406         bool baco_feature_is_enabled = false;
1407
1408         if(!smu->is_apu)
1409                 baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
1410
1411         ret = smu_system_features_control(smu, false);
1412         if (ret)
1413                 return ret;
1414
1415         if (baco_feature_is_enabled) {
1416                 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1417                 if (ret) {
1418                         pr_warn("set BACO feature enabled failed, return %d\n", ret);
1419                         return ret;
1420                 }
1421         }
1422
1423         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1424
1425         if (adev->asic_type >= CHIP_NAVI10 &&
1426             adev->gfx.rlc.funcs->stop)
1427                 adev->gfx.rlc.funcs->stop(adev);
1428         if (smu->is_apu)
1429                 smu_set_gfx_cgpg(&adev->smu, false);
1430
1431         return 0;
1432 }
1433
1434 static int smu_resume(void *handle)
1435 {
1436         int ret;
1437         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1438         struct smu_context *smu = &adev->smu;
1439
1440         pr_info("SMU is resuming...\n");
1441
1442         ret = smu_start_smc_engine(smu);
1443         if (ret) {
1444                 pr_err("SMU is not ready yet!\n");
1445                 goto failed;
1446         }
1447
1448         ret = smu_smc_table_hw_init(smu, false);
1449         if (ret)
1450                 goto failed;
1451
1452         ret = smu_start_thermal_control(smu);
1453         if (ret)
1454                 goto failed;
1455
1456         if (smu->is_apu)
1457                 smu_set_gfx_cgpg(&adev->smu, true);
1458
1459         smu->disable_uclk_switch = 0;
1460
1461         pr_info("SMU is resumed successfully!\n");
1462
1463         return 0;
1464
1465 failed:
1466         return ret;
1467 }
1468
1469 int smu_display_configuration_change(struct smu_context *smu,
1470                                      const struct amd_pp_display_configuration *display_config)
1471 {
1472         int index = 0;
1473         int num_of_active_display = 0;
1474
1475         if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1476                 return -EINVAL;
1477
1478         if (!display_config)
1479                 return -EINVAL;
1480
1481         mutex_lock(&smu->mutex);
1482
1483         if (smu->ppt_funcs->set_deep_sleep_dcefclk)
1484                 smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
1485                                 display_config->min_dcef_deep_sleep_set_clk / 100);
1486
1487         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1488                 if (display_config->displays[index].controller_id != 0)
1489                         num_of_active_display++;
1490         }
1491
1492         smu_set_active_display_count(smu, num_of_active_display);
1493
1494         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1495                            display_config->cpu_cc6_disable,
1496                            display_config->cpu_pstate_disable,
1497                            display_config->nb_pstate_switch_disable);
1498
1499         mutex_unlock(&smu->mutex);
1500
1501         return 0;
1502 }
1503
1504 static int smu_get_clock_info(struct smu_context *smu,
1505                               struct smu_clock_info *clk_info,
1506                               enum smu_perf_level_designation designation)
1507 {
1508         int ret;
1509         struct smu_performance_level level = {0};
1510
1511         if (!clk_info)
1512                 return -EINVAL;
1513
1514         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1515         if (ret)
1516                 return -EINVAL;
1517
1518         clk_info->min_mem_clk = level.memory_clock;
1519         clk_info->min_eng_clk = level.core_clock;
1520         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1521
1522         ret = smu_get_perf_level(smu, designation, &level);
1523         if (ret)
1524                 return -EINVAL;
1525
1526         clk_info->min_mem_clk = level.memory_clock;
1527         clk_info->min_eng_clk = level.core_clock;
1528         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1529
1530         return 0;
1531 }
1532
1533 int smu_get_current_clocks(struct smu_context *smu,
1534                            struct amd_pp_clock_info *clocks)
1535 {
1536         struct amd_pp_simple_clock_info simple_clocks = {0};
1537         struct smu_clock_info hw_clocks;
1538         int ret = 0;
1539
1540         if (!is_support_sw_smu(smu->adev))
1541                 return -EINVAL;
1542
1543         mutex_lock(&smu->mutex);
1544
1545         smu_get_dal_power_level(smu, &simple_clocks);
1546
1547         if (smu->support_power_containment)
1548                 ret = smu_get_clock_info(smu, &hw_clocks,
1549                                          PERF_LEVEL_POWER_CONTAINMENT);
1550         else
1551                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1552
1553         if (ret) {
1554                 pr_err("Error in smu_get_clock_info\n");
1555                 goto failed;
1556         }
1557
1558         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1559         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1560         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1561         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1562         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1563         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1564         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1565         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1566
1567         if (simple_clocks.level == 0)
1568                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1569         else
1570                 clocks->max_clocks_state = simple_clocks.level;
1571
1572         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1573                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1574                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1575         }
1576
1577 failed:
1578         mutex_unlock(&smu->mutex);
1579         return ret;
1580 }
1581
1582 static int smu_set_clockgating_state(void *handle,
1583                                      enum amd_clockgating_state state)
1584 {
1585         return 0;
1586 }
1587
1588 static int smu_set_powergating_state(void *handle,
1589                                      enum amd_powergating_state state)
1590 {
1591         return 0;
1592 }
1593
1594 static int smu_enable_umd_pstate(void *handle,
1595                       enum amd_dpm_forced_level *level)
1596 {
1597         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1598                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1599                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1600                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1601
1602         struct smu_context *smu = (struct smu_context*)(handle);
1603         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1604
1605         if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
1606                 return -EINVAL;
1607
1608         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1609                 /* enter umd pstate, save current level, disable gfx cg*/
1610                 if (*level & profile_mode_mask) {
1611                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1612                         smu_dpm_ctx->enable_umd_pstate = true;
1613                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1614                                                                AMD_IP_BLOCK_TYPE_GFX,
1615                                                                AMD_CG_STATE_UNGATE);
1616                         amdgpu_device_ip_set_powergating_state(smu->adev,
1617                                                                AMD_IP_BLOCK_TYPE_GFX,
1618                                                                AMD_PG_STATE_UNGATE);
1619                 }
1620         } else {
1621                 /* exit umd pstate, restore level, enable gfx cg*/
1622                 if (!(*level & profile_mode_mask)) {
1623                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1624                                 *level = smu_dpm_ctx->saved_dpm_level;
1625                         smu_dpm_ctx->enable_umd_pstate = false;
1626                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1627                                                                AMD_IP_BLOCK_TYPE_GFX,
1628                                                                AMD_CG_STATE_GATE);
1629                         amdgpu_device_ip_set_powergating_state(smu->adev,
1630                                                                AMD_IP_BLOCK_TYPE_GFX,
1631                                                                AMD_PG_STATE_GATE);
1632                 }
1633         }
1634
1635         return 0;
1636 }
1637
1638 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1639                                    enum amd_dpm_forced_level level,
1640                                    bool skip_display_settings)
1641 {
1642         int ret = 0;
1643         int index = 0;
1644         long workload;
1645         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1646
1647         if (!smu->pm_enabled)
1648                 return -EINVAL;
1649
1650         if (!skip_display_settings) {
1651                 ret = smu_display_config_changed(smu);
1652                 if (ret) {
1653                         pr_err("Failed to change display config!");
1654                         return ret;
1655                 }
1656         }
1657
1658         ret = smu_apply_clocks_adjust_rules(smu);
1659         if (ret) {
1660                 pr_err("Failed to apply clocks adjust rules!");
1661                 return ret;
1662         }
1663
1664         if (!skip_display_settings) {
1665                 ret = smu_notify_smc_display_config(smu);
1666                 if (ret) {
1667                         pr_err("Failed to notify smc display config!");
1668                         return ret;
1669                 }
1670         }
1671
1672         if (smu_dpm_ctx->dpm_level != level) {
1673                 ret = smu_asic_set_performance_level(smu, level);
1674                 if (ret) {
1675                         pr_err("Failed to set performance level!");
1676                         return ret;
1677                 }
1678
1679                 /* update the saved copy */
1680                 smu_dpm_ctx->dpm_level = level;
1681         }
1682
1683         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1684                 index = fls(smu->workload_mask);
1685                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1686                 workload = smu->workload_setting[index];
1687
1688                 if (smu->power_profile_mode != workload)
1689                         smu_set_power_profile_mode(smu, &workload, 0, false);
1690         }
1691
1692         return ret;
1693 }
1694
1695 int smu_handle_task(struct smu_context *smu,
1696                     enum amd_dpm_forced_level level,
1697                     enum amd_pp_task task_id,
1698                     bool lock_needed)
1699 {
1700         int ret = 0;
1701
1702         if (lock_needed)
1703                 mutex_lock(&smu->mutex);
1704
1705         switch (task_id) {
1706         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1707                 ret = smu_pre_display_config_changed(smu);
1708                 if (ret)
1709                         goto out;
1710                 ret = smu_set_cpu_power_state(smu);
1711                 if (ret)
1712                         goto out;
1713                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1714                 break;
1715         case AMD_PP_TASK_COMPLETE_INIT:
1716         case AMD_PP_TASK_READJUST_POWER_STATE:
1717                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1718                 break;
1719         default:
1720                 break;
1721         }
1722
1723 out:
1724         if (lock_needed)
1725                 mutex_unlock(&smu->mutex);
1726
1727         return ret;
1728 }
1729
1730 int smu_switch_power_profile(struct smu_context *smu,
1731                              enum PP_SMC_POWER_PROFILE type,
1732                              bool en)
1733 {
1734         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1735         long workload;
1736         uint32_t index;
1737
1738         if (!smu->pm_enabled)
1739                 return -EINVAL;
1740
1741         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1742                 return -EINVAL;
1743
1744         mutex_lock(&smu->mutex);
1745
1746         if (!en) {
1747                 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1748                 index = fls(smu->workload_mask);
1749                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1750                 workload = smu->workload_setting[index];
1751         } else {
1752                 smu->workload_mask |= (1 << smu->workload_prority[type]);
1753                 index = fls(smu->workload_mask);
1754                 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1755                 workload = smu->workload_setting[index];
1756         }
1757
1758         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1759                 smu_set_power_profile_mode(smu, &workload, 0, false);
1760
1761         mutex_unlock(&smu->mutex);
1762
1763         return 0;
1764 }
1765
1766 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1767 {
1768         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1769         enum amd_dpm_forced_level level;
1770
1771         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1772                 return -EINVAL;
1773
1774         mutex_lock(&(smu->mutex));
1775         level = smu_dpm_ctx->dpm_level;
1776         mutex_unlock(&(smu->mutex));
1777
1778         return level;
1779 }
1780
1781 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1782 {
1783         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1784         int ret = 0;
1785
1786         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1787                 return -EINVAL;
1788
1789         mutex_lock(&smu->mutex);
1790
1791         ret = smu_enable_umd_pstate(smu, &level);
1792         if (ret) {
1793                 mutex_unlock(&smu->mutex);
1794                 return ret;
1795         }
1796
1797         ret = smu_handle_task(smu, level,
1798                               AMD_PP_TASK_READJUST_POWER_STATE,
1799                               false);
1800
1801         mutex_unlock(&smu->mutex);
1802
1803         return ret;
1804 }
1805
1806 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1807 {
1808         int ret = 0;
1809
1810         mutex_lock(&smu->mutex);
1811         ret = smu_init_display_count(smu, count);
1812         mutex_unlock(&smu->mutex);
1813
1814         return ret;
1815 }
1816
1817 int smu_force_clk_levels(struct smu_context *smu,
1818                          enum smu_clk_type clk_type,
1819                          uint32_t mask,
1820                          bool lock_needed)
1821 {
1822         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1823         int ret = 0;
1824
1825         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1826                 pr_debug("force clock level is for dpm manual mode only.\n");
1827                 return -EINVAL;
1828         }
1829
1830         if (lock_needed)
1831                 mutex_lock(&smu->mutex);
1832
1833         if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1834                 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1835
1836         if (lock_needed)
1837                 mutex_unlock(&smu->mutex);
1838
1839         return ret;
1840 }
1841
1842 int smu_set_mp1_state(struct smu_context *smu,
1843                       enum pp_mp1_state mp1_state)
1844 {
1845         uint16_t msg;
1846         int ret;
1847
1848         /*
1849          * The SMC is not fully ready. That may be
1850          * expected as the IP may be masked.
1851          * So, just return without error.
1852          */
1853         if (!smu->pm_enabled)
1854                 return 0;
1855
1856         mutex_lock(&smu->mutex);
1857
1858         switch (mp1_state) {
1859         case PP_MP1_STATE_SHUTDOWN:
1860                 msg = SMU_MSG_PrepareMp1ForShutdown;
1861                 break;
1862         case PP_MP1_STATE_UNLOAD:
1863                 msg = SMU_MSG_PrepareMp1ForUnload;
1864                 break;
1865         case PP_MP1_STATE_RESET:
1866                 msg = SMU_MSG_PrepareMp1ForReset;
1867                 break;
1868         case PP_MP1_STATE_NONE:
1869         default:
1870                 mutex_unlock(&smu->mutex);
1871                 return 0;
1872         }
1873
1874         /* some asics may not support those messages */
1875         if (smu_msg_get_index(smu, msg) < 0) {
1876                 mutex_unlock(&smu->mutex);
1877                 return 0;
1878         }
1879
1880         ret = smu_send_smc_msg(smu, msg);
1881         if (ret)
1882                 pr_err("[PrepareMp1] Failed!\n");
1883
1884         mutex_unlock(&smu->mutex);
1885
1886         return ret;
1887 }
1888
1889 int smu_set_df_cstate(struct smu_context *smu,
1890                       enum pp_df_cstate state)
1891 {
1892         int ret = 0;
1893
1894         /*
1895          * The SMC is not fully ready. That may be
1896          * expected as the IP may be masked.
1897          * So, just return without error.
1898          */
1899         if (!smu->pm_enabled)
1900                 return 0;
1901
1902         if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1903                 return 0;
1904
1905         mutex_lock(&smu->mutex);
1906
1907         ret = smu->ppt_funcs->set_df_cstate(smu, state);
1908         if (ret)
1909                 pr_err("[SetDfCstate] failed!\n");
1910
1911         mutex_unlock(&smu->mutex);
1912
1913         return ret;
1914 }
1915
1916 int smu_write_watermarks_table(struct smu_context *smu)
1917 {
1918         int ret = 0;
1919         struct smu_table_context *smu_table = &smu->smu_table;
1920         struct smu_table *table = NULL;
1921
1922         table = &smu_table->tables[SMU_TABLE_WATERMARKS];
1923
1924         if (!table->cpu_addr)
1925                 return -EINVAL;
1926
1927         ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
1928                                 true);
1929
1930         return ret;
1931 }
1932
1933 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
1934                 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
1935 {
1936         struct smu_table *watermarks;
1937         void *table;
1938
1939         if (!smu->smu_table.tables)
1940                 return 0;
1941
1942         watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
1943         table = watermarks->cpu_addr;
1944
1945         mutex_lock(&smu->mutex);
1946
1947         if (!smu->disable_watermark &&
1948                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1949                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1950                 smu_set_watermarks_table(smu, table, clock_ranges);
1951                 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1952                 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1953         }
1954
1955         mutex_unlock(&smu->mutex);
1956
1957         return 0;
1958 }
1959
1960 const struct amd_ip_funcs smu_ip_funcs = {
1961         .name = "smu",
1962         .early_init = smu_early_init,
1963         .late_init = smu_late_init,
1964         .sw_init = smu_sw_init,
1965         .sw_fini = smu_sw_fini,
1966         .hw_init = smu_hw_init,
1967         .hw_fini = smu_hw_fini,
1968         .suspend = smu_suspend,
1969         .resume = smu_resume,
1970         .is_idle = NULL,
1971         .check_soft_reset = NULL,
1972         .wait_for_idle = NULL,
1973         .soft_reset = NULL,
1974         .set_clockgating_state = smu_set_clockgating_state,
1975         .set_powergating_state = smu_set_powergating_state,
1976         .enable_umd_pstate = smu_enable_umd_pstate,
1977 };
1978
1979 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1980 {
1981         .type = AMD_IP_BLOCK_TYPE_SMC,
1982         .major = 11,
1983         .minor = 0,
1984         .rev = 0,
1985         .funcs = &smu_ip_funcs,
1986 };
1987
1988 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
1989 {
1990         .type = AMD_IP_BLOCK_TYPE_SMC,
1991         .major = 12,
1992         .minor = 0,
1993         .rev = 0,
1994         .funcs = &smu_ip_funcs,
1995 };
1996
1997 int smu_load_microcode(struct smu_context *smu)
1998 {
1999         int ret = 0;
2000
2001         mutex_lock(&smu->mutex);
2002
2003         if (smu->ppt_funcs->load_microcode)
2004                 ret = smu->ppt_funcs->load_microcode(smu);
2005
2006         mutex_unlock(&smu->mutex);
2007
2008         return ret;
2009 }
2010
2011 int smu_check_fw_status(struct smu_context *smu)
2012 {
2013         int ret = 0;
2014
2015         mutex_lock(&smu->mutex);
2016
2017         if (smu->ppt_funcs->check_fw_status)
2018                 ret = smu->ppt_funcs->check_fw_status(smu);
2019
2020         mutex_unlock(&smu->mutex);
2021
2022         return ret;
2023 }
2024
2025 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2026 {
2027         int ret = 0;
2028
2029         mutex_lock(&smu->mutex);
2030
2031         if (smu->ppt_funcs->set_gfx_cgpg)
2032                 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2033
2034         mutex_unlock(&smu->mutex);
2035
2036         return ret;
2037 }
2038
2039 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
2040 {
2041         int ret = 0;
2042
2043         mutex_lock(&smu->mutex);
2044
2045         if (smu->ppt_funcs->set_fan_speed_rpm)
2046                 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2047
2048         mutex_unlock(&smu->mutex);
2049
2050         return ret;
2051 }
2052
2053 int smu_get_power_limit(struct smu_context *smu,
2054                         uint32_t *limit,
2055                         bool def,
2056                         bool lock_needed)
2057 {
2058         int ret = 0;
2059
2060         if (lock_needed)
2061                 mutex_lock(&smu->mutex);
2062
2063         if (smu->ppt_funcs->get_power_limit)
2064                 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2065
2066         if (lock_needed)
2067                 mutex_unlock(&smu->mutex);
2068
2069         return ret;
2070 }
2071
2072 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2073 {
2074         int ret = 0;
2075
2076         mutex_lock(&smu->mutex);
2077
2078         if (smu->ppt_funcs->set_power_limit)
2079                 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2080
2081         mutex_unlock(&smu->mutex);
2082
2083         return ret;
2084 }
2085
2086 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2087 {
2088         int ret = 0;
2089
2090         mutex_lock(&smu->mutex);
2091
2092         if (smu->ppt_funcs->print_clk_levels)
2093                 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2094
2095         mutex_unlock(&smu->mutex);
2096
2097         return ret;
2098 }
2099
2100 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2101 {
2102         int ret = 0;
2103
2104         mutex_lock(&smu->mutex);
2105
2106         if (smu->ppt_funcs->get_od_percentage)
2107                 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2108
2109         mutex_unlock(&smu->mutex);
2110
2111         return ret;
2112 }
2113
2114 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2115 {
2116         int ret = 0;
2117
2118         mutex_lock(&smu->mutex);
2119
2120         if (smu->ppt_funcs->set_od_percentage)
2121                 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2122
2123         mutex_unlock(&smu->mutex);
2124
2125         return ret;
2126 }
2127
2128 int smu_od_edit_dpm_table(struct smu_context *smu,
2129                           enum PP_OD_DPM_TABLE_COMMAND type,
2130                           long *input, uint32_t size)
2131 {
2132         int ret = 0;
2133
2134         mutex_lock(&smu->mutex);
2135
2136         if (smu->ppt_funcs->od_edit_dpm_table)
2137                 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2138
2139         mutex_unlock(&smu->mutex);
2140
2141         return ret;
2142 }
2143
2144 int smu_read_sensor(struct smu_context *smu,
2145                     enum amd_pp_sensors sensor,
2146                     void *data, uint32_t *size)
2147 {
2148         int ret = 0;
2149
2150         mutex_lock(&smu->mutex);
2151
2152         if (smu->ppt_funcs->read_sensor)
2153                 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2154
2155         mutex_unlock(&smu->mutex);
2156
2157         return ret;
2158 }
2159
2160 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2161 {
2162         int ret = 0;
2163
2164         mutex_lock(&smu->mutex);
2165
2166         if (smu->ppt_funcs->get_power_profile_mode)
2167                 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2168
2169         mutex_unlock(&smu->mutex);
2170
2171         return ret;
2172 }
2173
2174 int smu_set_power_profile_mode(struct smu_context *smu,
2175                                long *param,
2176                                uint32_t param_size,
2177                                bool lock_needed)
2178 {
2179         int ret = 0;
2180
2181         if (lock_needed)
2182                 mutex_lock(&smu->mutex);
2183
2184         if (smu->ppt_funcs->set_power_profile_mode)
2185                 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2186
2187         if (lock_needed)
2188                 mutex_unlock(&smu->mutex);
2189
2190         return ret;
2191 }
2192
2193
2194 int smu_get_fan_control_mode(struct smu_context *smu)
2195 {
2196         int ret = 0;
2197
2198         mutex_lock(&smu->mutex);
2199
2200         if (smu->ppt_funcs->get_fan_control_mode)
2201                 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2202
2203         mutex_unlock(&smu->mutex);
2204
2205         return ret;
2206 }
2207
2208 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2209 {
2210         int ret = 0;
2211
2212         mutex_lock(&smu->mutex);
2213
2214         if (smu->ppt_funcs->set_fan_control_mode)
2215                 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2216
2217         mutex_unlock(&smu->mutex);
2218
2219         return ret;
2220 }
2221
2222 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2223 {
2224         int ret = 0;
2225
2226         mutex_lock(&smu->mutex);
2227
2228         if (smu->ppt_funcs->get_fan_speed_percent)
2229                 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2230
2231         mutex_unlock(&smu->mutex);
2232
2233         return ret;
2234 }
2235
2236 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2237 {
2238         int ret = 0;
2239
2240         mutex_lock(&smu->mutex);
2241
2242         if (smu->ppt_funcs->set_fan_speed_percent)
2243                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2244
2245         mutex_unlock(&smu->mutex);
2246
2247         return ret;
2248 }
2249
2250 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2251 {
2252         int ret = 0;
2253
2254         mutex_lock(&smu->mutex);
2255
2256         if (smu->ppt_funcs->get_fan_speed_rpm)
2257                 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2258
2259         mutex_unlock(&smu->mutex);
2260
2261         return ret;
2262 }
2263
2264 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2265 {
2266         int ret = 0;
2267
2268         mutex_lock(&smu->mutex);
2269
2270         if (smu->ppt_funcs->set_deep_sleep_dcefclk)
2271                 ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
2272
2273         mutex_unlock(&smu->mutex);
2274
2275         return ret;
2276 }
2277
2278 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2279 {
2280         int ret = 0;
2281
2282         if (smu->ppt_funcs->set_active_display_count)
2283                 ret = smu->ppt_funcs->set_active_display_count(smu, count);
2284
2285         return ret;
2286 }
2287
2288 int smu_get_clock_by_type(struct smu_context *smu,
2289                           enum amd_pp_clock_type type,
2290                           struct amd_pp_clocks *clocks)
2291 {
2292         int ret = 0;
2293
2294         mutex_lock(&smu->mutex);
2295
2296         if (smu->ppt_funcs->get_clock_by_type)
2297                 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2298
2299         mutex_unlock(&smu->mutex);
2300
2301         return ret;
2302 }
2303
2304 int smu_get_max_high_clocks(struct smu_context *smu,
2305                             struct amd_pp_simple_clock_info *clocks)
2306 {
2307         int ret = 0;
2308
2309         mutex_lock(&smu->mutex);
2310
2311         if (smu->ppt_funcs->get_max_high_clocks)
2312                 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2313
2314         mutex_unlock(&smu->mutex);
2315
2316         return ret;
2317 }
2318
2319 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2320                                        enum smu_clk_type clk_type,
2321                                        struct pp_clock_levels_with_latency *clocks)
2322 {
2323         int ret = 0;
2324
2325         mutex_lock(&smu->mutex);
2326
2327         if (smu->ppt_funcs->get_clock_by_type_with_latency)
2328                 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2329
2330         mutex_unlock(&smu->mutex);
2331
2332         return ret;
2333 }
2334
2335 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2336                                        enum amd_pp_clock_type type,
2337                                        struct pp_clock_levels_with_voltage *clocks)
2338 {
2339         int ret = 0;
2340
2341         mutex_lock(&smu->mutex);
2342
2343         if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2344                 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2345
2346         mutex_unlock(&smu->mutex);
2347
2348         return ret;
2349 }
2350
2351
2352 int smu_display_clock_voltage_request(struct smu_context *smu,
2353                                       struct pp_display_clock_request *clock_req)
2354 {
2355         int ret = 0;
2356
2357         mutex_lock(&smu->mutex);
2358
2359         if (smu->ppt_funcs->display_clock_voltage_request)
2360                 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2361
2362         mutex_unlock(&smu->mutex);
2363
2364         return ret;
2365 }
2366
2367
2368 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2369 {
2370         int ret = -EINVAL;
2371
2372         mutex_lock(&smu->mutex);
2373
2374         if (smu->ppt_funcs->display_disable_memory_clock_switch)
2375                 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2376
2377         mutex_unlock(&smu->mutex);
2378
2379         return ret;
2380 }
2381
2382 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2383 {
2384         int ret = 0;
2385
2386         mutex_lock(&smu->mutex);
2387
2388         if (smu->ppt_funcs->notify_smu_enable_pwe)
2389                 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2390
2391         mutex_unlock(&smu->mutex);
2392
2393         return ret;
2394 }
2395
2396 int smu_set_xgmi_pstate(struct smu_context *smu,
2397                         uint32_t pstate)
2398 {
2399         int ret = 0;
2400
2401         mutex_lock(&smu->mutex);
2402
2403         if (smu->ppt_funcs->set_xgmi_pstate)
2404                 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2405
2406         mutex_unlock(&smu->mutex);
2407
2408         return ret;
2409 }
2410
2411 int smu_set_azalia_d3_pme(struct smu_context *smu)
2412 {
2413         int ret = 0;
2414
2415         mutex_lock(&smu->mutex);
2416
2417         if (smu->ppt_funcs->set_azalia_d3_pme)
2418                 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2419
2420         mutex_unlock(&smu->mutex);
2421
2422         return ret;
2423 }
2424
2425 bool smu_baco_is_support(struct smu_context *smu)
2426 {
2427         bool ret = false;
2428
2429         mutex_lock(&smu->mutex);
2430
2431         if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2432                 ret = smu->ppt_funcs->baco_is_support(smu);
2433
2434         mutex_unlock(&smu->mutex);
2435
2436         return ret;
2437 }
2438
2439 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2440 {
2441         if (smu->ppt_funcs->baco_get_state)
2442                 return -EINVAL;
2443
2444         mutex_lock(&smu->mutex);
2445         *state = smu->ppt_funcs->baco_get_state(smu);
2446         mutex_unlock(&smu->mutex);
2447
2448         return 0;
2449 }
2450
2451 int smu_baco_enter(struct smu_context *smu)
2452 {
2453         int ret = 0;
2454
2455         mutex_lock(&smu->mutex);
2456
2457         if (smu->ppt_funcs->baco_enter)
2458                 ret = smu->ppt_funcs->baco_enter(smu);
2459
2460         mutex_unlock(&smu->mutex);
2461
2462         return ret;
2463 }
2464
2465 int smu_baco_exit(struct smu_context *smu)
2466 {
2467         int ret = 0;
2468
2469         mutex_lock(&smu->mutex);
2470
2471         if (smu->ppt_funcs->baco_exit)
2472                 ret = smu->ppt_funcs->baco_exit(smu);
2473
2474         mutex_unlock(&smu->mutex);
2475
2476         return ret;
2477 }
2478
2479 int smu_mode2_reset(struct smu_context *smu)
2480 {
2481         int ret = 0;
2482
2483         mutex_lock(&smu->mutex);
2484
2485         if (smu->ppt_funcs->mode2_reset)
2486                 ret = smu->ppt_funcs->mode2_reset(smu);
2487
2488         mutex_unlock(&smu->mutex);
2489
2490         return ret;
2491 }
2492
2493 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2494                                          struct pp_smu_nv_clock_table *max_clocks)
2495 {
2496         int ret = 0;
2497
2498         mutex_lock(&smu->mutex);
2499
2500         if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2501                 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2502
2503         mutex_unlock(&smu->mutex);
2504
2505         return ret;
2506 }
2507
2508 int smu_get_uclk_dpm_states(struct smu_context *smu,
2509                             unsigned int *clock_values_in_khz,
2510                             unsigned int *num_states)
2511 {
2512         int ret = 0;
2513
2514         mutex_lock(&smu->mutex);
2515
2516         if (smu->ppt_funcs->get_uclk_dpm_states)
2517                 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2518
2519         mutex_unlock(&smu->mutex);
2520
2521         return ret;
2522 }
2523
2524 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2525 {
2526         enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2527
2528         mutex_lock(&smu->mutex);
2529
2530         if (smu->ppt_funcs->get_current_power_state)
2531                 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2532
2533         mutex_unlock(&smu->mutex);
2534
2535         return pm_state;
2536 }
2537
2538 int smu_get_dpm_clock_table(struct smu_context *smu,
2539                             struct dpm_clocks *clock_table)
2540 {
2541         int ret = 0;
2542
2543         mutex_lock(&smu->mutex);
2544
2545         if (smu->ppt_funcs->get_dpm_clock_table)
2546                 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2547
2548         mutex_unlock(&smu->mutex);
2549
2550         return ret;
2551 }
2552
2553 uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
2554 {
2555         uint32_t ret = 0;
2556
2557         if (smu->ppt_funcs->get_pptable_power_limit)
2558                 ret = smu->ppt_funcs->get_pptable_power_limit(smu);
2559
2560         return ret;
2561 }
2562
2563 int smu_send_smc_msg(struct smu_context *smu,
2564                      enum smu_message_type msg)
2565 {
2566         int ret;
2567
2568         ret = smu_send_smc_msg_with_param(smu, msg, 0);
2569         return ret;
2570 }