]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
03896e667c1425474cb8c6f7444e35fdac5a0457
[linux.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/firmware.h>
24
25 #include "pp_debug.h"
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
29 #include "smu_v11_0.h"
30 #include "smu_v12_0.h"
31 #include "atom.h"
32 #include "amd_pcie.h"
33
34 #undef __SMU_DUMMY_MAP
35 #define __SMU_DUMMY_MAP(type)   #type
36 static const char* __smu_message_names[] = {
37         SMU_MESSAGE_TYPES
38 };
39
40 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
41 {
42         if (type < 0 || type >= SMU_MSG_MAX_COUNT)
43                 return "unknown smu message";
44         return __smu_message_names[type];
45 }
46
47 #undef __SMU_DUMMY_MAP
48 #define __SMU_DUMMY_MAP(fea)    #fea
49 static const char* __smu_feature_names[] = {
50         SMU_FEATURE_MASKS
51 };
52
53 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
54 {
55         if (feature < 0 || feature >= SMU_FEATURE_COUNT)
56                 return "unknown smu feature";
57         return __smu_feature_names[feature];
58 }
59
60 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
61 {
62         size_t size = 0;
63         int ret = 0, i = 0;
64         uint32_t feature_mask[2] = { 0 };
65         int32_t feature_index = 0;
66         uint32_t count = 0;
67         uint32_t sort_feature[SMU_FEATURE_COUNT];
68         uint64_t hw_feature_count = 0;
69
70         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
71         if (ret)
72                 goto failed;
73
74         size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
75                         feature_mask[1], feature_mask[0]);
76
77         for (i = 0; i < SMU_FEATURE_COUNT; i++) {
78                 feature_index = smu_feature_get_index(smu, i);
79                 if (feature_index < 0)
80                         continue;
81                 sort_feature[feature_index] = i;
82                 hw_feature_count++;
83         }
84
85         for (i = 0; i < hw_feature_count; i++) {
86                 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
87                                count++,
88                                smu_get_feature_name(smu, sort_feature[i]),
89                                i,
90                                !!smu_feature_is_enabled(smu, sort_feature[i]) ?
91                                "enabled" : "disabled");
92         }
93
94 failed:
95         return size;
96 }
97
98 static int smu_feature_update_enable_state(struct smu_context *smu,
99                                            uint64_t feature_mask,
100                                            bool enabled)
101 {
102         struct smu_feature *feature = &smu->smu_feature;
103         uint32_t feature_low = 0, feature_high = 0;
104         int ret = 0;
105
106         if (!smu->pm_enabled)
107                 return ret;
108
109         feature_low = (feature_mask >> 0 ) & 0xffffffff;
110         feature_high = (feature_mask >> 32) & 0xffffffff;
111
112         if (enabled) {
113                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
114                                                   feature_low);
115                 if (ret)
116                         return ret;
117                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
118                                                   feature_high);
119                 if (ret)
120                         return ret;
121         } else {
122                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
123                                                   feature_low);
124                 if (ret)
125                         return ret;
126                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
127                                                   feature_high);
128                 if (ret)
129                         return ret;
130         }
131
132         mutex_lock(&feature->mutex);
133         if (enabled)
134                 bitmap_or(feature->enabled, feature->enabled,
135                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
136         else
137                 bitmap_andnot(feature->enabled, feature->enabled,
138                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
139         mutex_unlock(&feature->mutex);
140
141         return ret;
142 }
143
144 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
145 {
146         int ret = 0;
147         uint32_t feature_mask[2] = { 0 };
148         uint64_t feature_2_enabled = 0;
149         uint64_t feature_2_disabled = 0;
150         uint64_t feature_enables = 0;
151
152         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
153         if (ret)
154                 return ret;
155
156         feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
157
158         feature_2_enabled  = ~feature_enables & new_mask;
159         feature_2_disabled = feature_enables & ~new_mask;
160
161         if (feature_2_enabled) {
162                 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
163                 if (ret)
164                         return ret;
165         }
166         if (feature_2_disabled) {
167                 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
168                 if (ret)
169                         return ret;
170         }
171
172         return ret;
173 }
174
175 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
176 {
177         int ret = 0;
178
179         if (!if_version && !smu_version)
180                 return -EINVAL;
181
182         if (if_version) {
183                 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
184                 if (ret)
185                         return ret;
186
187                 ret = smu_read_smc_arg(smu, if_version);
188                 if (ret)
189                         return ret;
190         }
191
192         if (smu_version) {
193                 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
194                 if (ret)
195                         return ret;
196
197                 ret = smu_read_smc_arg(smu, smu_version);
198                 if (ret)
199                         return ret;
200         }
201
202         return ret;
203 }
204
205 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
206                             uint32_t min, uint32_t max)
207 {
208         int ret = 0, clk_id = 0;
209         uint32_t param;
210
211         if (min <= 0 && max <= 0)
212                 return -EINVAL;
213
214         if (!smu_clk_dpm_is_enabled(smu, clk_type))
215                 return 0;
216
217         clk_id = smu_clk_get_index(smu, clk_type);
218         if (clk_id < 0)
219                 return clk_id;
220
221         if (max > 0) {
222                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
223                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
224                                                   param);
225                 if (ret)
226                         return ret;
227         }
228
229         if (min > 0) {
230                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
231                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
232                                                   param);
233                 if (ret)
234                         return ret;
235         }
236
237
238         return ret;
239 }
240
241 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
242                             uint32_t min, uint32_t max)
243 {
244         int ret = 0, clk_id = 0;
245         uint32_t param;
246
247         if (min <= 0 && max <= 0)
248                 return -EINVAL;
249
250         if (!smu_clk_dpm_is_enabled(smu, clk_type))
251                 return 0;
252
253         clk_id = smu_clk_get_index(smu, clk_type);
254         if (clk_id < 0)
255                 return clk_id;
256
257         if (max > 0) {
258                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
259                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
260                                                   param);
261                 if (ret)
262                         return ret;
263         }
264
265         if (min > 0) {
266                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
267                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
268                                                   param);
269                 if (ret)
270                         return ret;
271         }
272
273
274         return ret;
275 }
276
277 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
278                            uint32_t *min, uint32_t *max)
279 {
280         uint32_t clock_limit;
281         int ret = 0;
282
283         if (!min && !max)
284                 return -EINVAL;
285
286         if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
287                 switch (clk_type) {
288                 case SMU_MCLK:
289                 case SMU_UCLK:
290                         clock_limit = smu->smu_table.boot_values.uclk;
291                         break;
292                 case SMU_GFXCLK:
293                 case SMU_SCLK:
294                         clock_limit = smu->smu_table.boot_values.gfxclk;
295                         break;
296                 case SMU_SOCCLK:
297                         clock_limit = smu->smu_table.boot_values.socclk;
298                         break;
299                 default:
300                         clock_limit = 0;
301                         break;
302                 }
303
304                 /* clock in Mhz unit */
305                 if (min)
306                         *min = clock_limit / 100;
307                 if (max)
308                         *max = clock_limit / 100;
309
310                 return 0;
311         }
312         /*
313          * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
314          * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
315          */
316         ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
317         return ret;
318 }
319
320 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
321                               uint16_t level, uint32_t *value)
322 {
323         int ret = 0, clk_id = 0;
324         uint32_t param;
325
326         if (!value)
327                 return -EINVAL;
328
329         if (!smu_clk_dpm_is_enabled(smu, clk_type))
330                 return 0;
331
332         clk_id = smu_clk_get_index(smu, clk_type);
333         if (clk_id < 0)
334                 return clk_id;
335
336         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
337
338         ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
339                                           param);
340         if (ret)
341                 return ret;
342
343         ret = smu_read_smc_arg(smu, &param);
344         if (ret)
345                 return ret;
346
347         /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
348          * now, we un-support it */
349         *value = param & 0x7fffffff;
350
351         return ret;
352 }
353
354 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
355                             uint32_t *value)
356 {
357         return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
358 }
359
360 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
361 {
362         enum smu_feature_mask feature_id = 0;
363
364         switch (clk_type) {
365         case SMU_MCLK:
366         case SMU_UCLK:
367                 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
368                 break;
369         case SMU_GFXCLK:
370         case SMU_SCLK:
371                 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
372                 break;
373         case SMU_SOCCLK:
374                 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
375                 break;
376         default:
377                 return true;
378         }
379
380         if(!smu_feature_is_enabled(smu, feature_id)) {
381                 return false;
382         }
383
384         return true;
385 }
386
387
388 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
389                            bool gate)
390 {
391         int ret = 0;
392
393         switch (block_type) {
394         case AMD_IP_BLOCK_TYPE_UVD:
395                 ret = smu_dpm_set_uvd_enable(smu, gate);
396                 break;
397         case AMD_IP_BLOCK_TYPE_VCE:
398                 ret = smu_dpm_set_vce_enable(smu, gate);
399                 break;
400         case AMD_IP_BLOCK_TYPE_GFX:
401                 ret = smu_gfx_off_control(smu, gate);
402                 break;
403         default:
404                 break;
405         }
406
407         return ret;
408 }
409
410 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
411 {
412         /* not support power state */
413         return POWER_STATE_TYPE_DEFAULT;
414 }
415
416 int smu_get_power_num_states(struct smu_context *smu,
417                              struct pp_states_info *state_info)
418 {
419         if (!state_info)
420                 return -EINVAL;
421
422         /* not support power state */
423         memset(state_info, 0, sizeof(struct pp_states_info));
424         state_info->nums = 1;
425         state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
426
427         return 0;
428 }
429
430 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
431                            void *data, uint32_t *size)
432 {
433         struct smu_power_context *smu_power = &smu->smu_power;
434         struct smu_power_gate *power_gate = &smu_power->power_gate;
435         int ret = 0;
436
437         if(!data || !size)
438                 return -EINVAL;
439
440         switch (sensor) {
441         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
442                 *((uint32_t *)data) = smu->pstate_sclk;
443                 *size = 4;
444                 break;
445         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
446                 *((uint32_t *)data) = smu->pstate_mclk;
447                 *size = 4;
448                 break;
449         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
450                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
451                 *size = 8;
452                 break;
453         case AMDGPU_PP_SENSOR_UVD_POWER:
454                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
455                 *size = 4;
456                 break;
457         case AMDGPU_PP_SENSOR_VCE_POWER:
458                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
459                 *size = 4;
460                 break;
461         case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
462                 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
463                 *size = 4;
464                 break;
465         default:
466                 ret = -EINVAL;
467                 break;
468         }
469
470         if (ret)
471                 *size = 0;
472
473         return ret;
474 }
475
476 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
477                      void *table_data, bool drv2smu)
478 {
479         struct smu_table_context *smu_table = &smu->smu_table;
480         struct amdgpu_device *adev = smu->adev;
481         struct smu_table *table = NULL;
482         int ret = 0;
483         int table_id = smu_table_get_index(smu, table_index);
484
485         if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
486                 return -EINVAL;
487
488         table = &smu_table->tables[table_index];
489
490         if (drv2smu)
491                 memcpy(table->cpu_addr, table_data, table->size);
492
493         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
494                                           upper_32_bits(table->mc_address));
495         if (ret)
496                 return ret;
497         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
498                                           lower_32_bits(table->mc_address));
499         if (ret)
500                 return ret;
501         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
502                                           SMU_MSG_TransferTableDram2Smu :
503                                           SMU_MSG_TransferTableSmu2Dram,
504                                           table_id | ((argument & 0xFFFF) << 16));
505         if (ret)
506                 return ret;
507
508         /* flush hdp cache */
509         adev->nbio.funcs->hdp_flush(adev, NULL);
510
511         if (!drv2smu)
512                 memcpy(table_data, table->cpu_addr, table->size);
513
514         return ret;
515 }
516
517 bool is_support_sw_smu(struct amdgpu_device *adev)
518 {
519         if (adev->asic_type == CHIP_VEGA20)
520                 return (amdgpu_dpm == 2) ? true : false;
521         else if (adev->asic_type >= CHIP_ARCTURUS)
522                 return true;
523         else
524                 return false;
525 }
526
527 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
528 {
529         if (amdgpu_dpm != 1)
530                 return false;
531
532         if (adev->asic_type == CHIP_VEGA20)
533                 return true;
534
535         return false;
536 }
537
538 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
539 {
540         struct smu_table_context *smu_table = &smu->smu_table;
541
542         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
543                 return -EINVAL;
544
545         if (smu_table->hardcode_pptable)
546                 *table = smu_table->hardcode_pptable;
547         else
548                 *table = smu_table->power_play_table;
549
550         return smu_table->power_play_table_size;
551 }
552
553 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
554 {
555         struct smu_table_context *smu_table = &smu->smu_table;
556         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
557         int ret = 0;
558
559         if (!smu->pm_enabled)
560                 return -EINVAL;
561         if (header->usStructureSize != size) {
562                 pr_err("pp table size not matched !\n");
563                 return -EIO;
564         }
565
566         mutex_lock(&smu->mutex);
567         if (!smu_table->hardcode_pptable)
568                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
569         if (!smu_table->hardcode_pptable) {
570                 ret = -ENOMEM;
571                 goto failed;
572         }
573
574         memcpy(smu_table->hardcode_pptable, buf, size);
575         smu_table->power_play_table = smu_table->hardcode_pptable;
576         smu_table->power_play_table_size = size;
577         mutex_unlock(&smu->mutex);
578
579         ret = smu_reset(smu);
580         if (ret)
581                 pr_info("smu reset failed, ret = %d\n", ret);
582
583         return ret;
584
585 failed:
586         mutex_unlock(&smu->mutex);
587         return ret;
588 }
589
590 int smu_feature_init_dpm(struct smu_context *smu)
591 {
592         struct smu_feature *feature = &smu->smu_feature;
593         int ret = 0;
594         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
595
596         if (!smu->pm_enabled)
597                 return ret;
598         mutex_lock(&feature->mutex);
599         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
600         mutex_unlock(&feature->mutex);
601
602         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
603                                              SMU_FEATURE_MAX/32);
604         if (ret)
605                 return ret;
606
607         mutex_lock(&feature->mutex);
608         bitmap_or(feature->allowed, feature->allowed,
609                       (unsigned long *)allowed_feature_mask,
610                       feature->feature_num);
611         mutex_unlock(&feature->mutex);
612
613         return ret;
614 }
615
616
617 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
618 {
619         struct amdgpu_device *adev = smu->adev;
620         struct smu_feature *feature = &smu->smu_feature;
621         int feature_id;
622         int ret = 0;
623
624         if (adev->flags & AMD_IS_APU)
625                 return 1;
626
627         feature_id = smu_feature_get_index(smu, mask);
628         if (feature_id < 0)
629                 return 0;
630
631         WARN_ON(feature_id > feature->feature_num);
632
633         mutex_lock(&feature->mutex);
634         ret = test_bit(feature_id, feature->enabled);
635         mutex_unlock(&feature->mutex);
636
637         return ret;
638 }
639
640 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
641                             bool enable)
642 {
643         struct smu_feature *feature = &smu->smu_feature;
644         int feature_id;
645
646         feature_id = smu_feature_get_index(smu, mask);
647         if (feature_id < 0)
648                 return -EINVAL;
649
650         WARN_ON(feature_id > feature->feature_num);
651
652         return smu_feature_update_enable_state(smu,
653                                                1ULL << feature_id,
654                                                enable);
655 }
656
657 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
658 {
659         struct smu_feature *feature = &smu->smu_feature;
660         int feature_id;
661         int ret = 0;
662
663         feature_id = smu_feature_get_index(smu, mask);
664         if (feature_id < 0)
665                 return 0;
666
667         WARN_ON(feature_id > feature->feature_num);
668
669         mutex_lock(&feature->mutex);
670         ret = test_bit(feature_id, feature->supported);
671         mutex_unlock(&feature->mutex);
672
673         return ret;
674 }
675
676 int smu_feature_set_supported(struct smu_context *smu,
677                               enum smu_feature_mask mask,
678                               bool enable)
679 {
680         struct smu_feature *feature = &smu->smu_feature;
681         int feature_id;
682         int ret = 0;
683
684         feature_id = smu_feature_get_index(smu, mask);
685         if (feature_id < 0)
686                 return -EINVAL;
687
688         WARN_ON(feature_id > feature->feature_num);
689
690         mutex_lock(&feature->mutex);
691         if (enable)
692                 test_and_set_bit(feature_id, feature->supported);
693         else
694                 test_and_clear_bit(feature_id, feature->supported);
695         mutex_unlock(&feature->mutex);
696
697         return ret;
698 }
699
700 static int smu_set_funcs(struct amdgpu_device *adev)
701 {
702         struct smu_context *smu = &adev->smu;
703
704         switch (adev->asic_type) {
705         case CHIP_VEGA20:
706         case CHIP_NAVI10:
707         case CHIP_NAVI14:
708         case CHIP_NAVI12:
709         case CHIP_ARCTURUS:
710                 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
711                         smu->od_enabled = true;
712                 smu_v11_0_set_smu_funcs(smu);
713                 break;
714         case CHIP_RENOIR:
715                 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
716                         smu->od_enabled = true;
717                 smu_v12_0_set_smu_funcs(smu);
718                 break;
719         default:
720                 return -EINVAL;
721         }
722
723         return 0;
724 }
725
726 static int smu_early_init(void *handle)
727 {
728         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
729         struct smu_context *smu = &adev->smu;
730
731         smu->adev = adev;
732         smu->pm_enabled = !!amdgpu_dpm;
733         mutex_init(&smu->mutex);
734
735         return smu_set_funcs(adev);
736 }
737
738 static int smu_late_init(void *handle)
739 {
740         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
741         struct smu_context *smu = &adev->smu;
742
743         if (!smu->pm_enabled)
744                 return 0;
745
746         mutex_lock(&smu->mutex);
747         smu_handle_task(&adev->smu,
748                         smu->smu_dpm.dpm_level,
749                         AMD_PP_TASK_COMPLETE_INIT);
750         mutex_unlock(&smu->mutex);
751
752         return 0;
753 }
754
755 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
756                             uint16_t *size, uint8_t *frev, uint8_t *crev,
757                             uint8_t **addr)
758 {
759         struct amdgpu_device *adev = smu->adev;
760         uint16_t data_start;
761
762         if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
763                                            size, frev, crev, &data_start))
764                 return -EINVAL;
765
766         *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
767
768         return 0;
769 }
770
771 static int smu_initialize_pptable(struct smu_context *smu)
772 {
773         /* TODO */
774         return 0;
775 }
776
777 static int smu_smc_table_sw_init(struct smu_context *smu)
778 {
779         int ret;
780
781         ret = smu_initialize_pptable(smu);
782         if (ret) {
783                 pr_err("Failed to init smu_initialize_pptable!\n");
784                 return ret;
785         }
786
787         /**
788          * Create smu_table structure, and init smc tables such as
789          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
790          */
791         ret = smu_init_smc_tables(smu);
792         if (ret) {
793                 pr_err("Failed to init smc tables!\n");
794                 return ret;
795         }
796
797         /**
798          * Create smu_power_context structure, and allocate smu_dpm_context and
799          * context size to fill the smu_power_context data.
800          */
801         ret = smu_init_power(smu);
802         if (ret) {
803                 pr_err("Failed to init smu_init_power!\n");
804                 return ret;
805         }
806
807         return 0;
808 }
809
810 static int smu_smc_table_sw_fini(struct smu_context *smu)
811 {
812         int ret;
813
814         ret = smu_fini_smc_tables(smu);
815         if (ret) {
816                 pr_err("Failed to smu_fini_smc_tables!\n");
817                 return ret;
818         }
819
820         return 0;
821 }
822
823 static int smu_sw_init(void *handle)
824 {
825         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
826         struct smu_context *smu = &adev->smu;
827         int ret;
828
829         smu->pool_size = adev->pm.smu_prv_buffer_size;
830         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
831         mutex_init(&smu->smu_feature.mutex);
832         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
833         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
834         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
835
836         mutex_init(&smu->smu_baco.mutex);
837         smu->smu_baco.state = SMU_BACO_STATE_EXIT;
838         smu->smu_baco.platform_support = false;
839
840         smu->watermarks_bitmap = 0;
841         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
842         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
843
844         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
845         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
846         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
847         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
848         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
849         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
850         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
851         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
852
853         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
854         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
855         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
856         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
857         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
858         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
859         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
860         smu->display_config = &adev->pm.pm_display_cfg;
861
862         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
863         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
864         ret = smu_init_microcode(smu);
865         if (ret) {
866                 pr_err("Failed to load smu firmware!\n");
867                 return ret;
868         }
869
870         ret = smu_smc_table_sw_init(smu);
871         if (ret) {
872                 pr_err("Failed to sw init smc table!\n");
873                 return ret;
874         }
875
876         ret = smu_register_irq_handler(smu);
877         if (ret) {
878                 pr_err("Failed to register smc irq handler!\n");
879                 return ret;
880         }
881
882         return 0;
883 }
884
885 static int smu_sw_fini(void *handle)
886 {
887         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
888         struct smu_context *smu = &adev->smu;
889         int ret;
890
891         kfree(smu->irq_source);
892         smu->irq_source = NULL;
893
894         ret = smu_smc_table_sw_fini(smu);
895         if (ret) {
896                 pr_err("Failed to sw fini smc table!\n");
897                 return ret;
898         }
899
900         ret = smu_fini_power(smu);
901         if (ret) {
902                 pr_err("Failed to init smu_fini_power!\n");
903                 return ret;
904         }
905
906         return 0;
907 }
908
909 static int smu_init_fb_allocations(struct smu_context *smu)
910 {
911         struct amdgpu_device *adev = smu->adev;
912         struct smu_table_context *smu_table = &smu->smu_table;
913         struct smu_table *tables = smu_table->tables;
914         uint32_t i = 0;
915         int32_t ret = 0;
916
917         for (i = 0; i < SMU_TABLE_COUNT; i++) {
918                 if (tables[i].size == 0)
919                         continue;
920                 ret = amdgpu_bo_create_kernel(adev,
921                                               tables[i].size,
922                                               tables[i].align,
923                                               tables[i].domain,
924                                               &tables[i].bo,
925                                               &tables[i].mc_address,
926                                               &tables[i].cpu_addr);
927                 if (ret)
928                         goto failed;
929         }
930
931         return 0;
932 failed:
933         for (; i > 0; i--) {
934                 if (tables[i].size == 0)
935                         continue;
936                 amdgpu_bo_free_kernel(&tables[i].bo,
937                                       &tables[i].mc_address,
938                                       &tables[i].cpu_addr);
939
940         }
941         return ret;
942 }
943
944 static int smu_fini_fb_allocations(struct smu_context *smu)
945 {
946         struct smu_table_context *smu_table = &smu->smu_table;
947         struct smu_table *tables = smu_table->tables;
948         uint32_t i = 0;
949
950         if (!tables)
951                 return 0;
952
953         for (i = 0; i < SMU_TABLE_COUNT; i++) {
954                 if (tables[i].size == 0)
955                         continue;
956                 amdgpu_bo_free_kernel(&tables[i].bo,
957                                       &tables[i].mc_address,
958                                       &tables[i].cpu_addr);
959         }
960
961         return 0;
962 }
963
964 static int smu_override_pcie_parameters(struct smu_context *smu)
965 {
966         struct amdgpu_device *adev = smu->adev;
967         uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
968         int ret;
969
970         if (adev->flags & AMD_IS_APU)
971                 return 0;
972
973         if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
974                 pcie_gen = 3;
975         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
976                 pcie_gen = 2;
977         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
978                 pcie_gen = 1;
979         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
980                 pcie_gen = 0;
981
982         /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
983          * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
984          * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
985          */
986         if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
987                 pcie_width = 6;
988         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
989                 pcie_width = 5;
990         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
991                 pcie_width = 4;
992         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
993                 pcie_width = 3;
994         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
995                 pcie_width = 2;
996         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
997                 pcie_width = 1;
998
999         smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
1000         ret = smu_send_smc_msg_with_param(smu,
1001                                           SMU_MSG_OverridePcieParameters,
1002                                           smu_pcie_arg);
1003         if (ret)
1004                 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
1005         return ret;
1006 }
1007
1008 static int smu_smc_table_hw_init(struct smu_context *smu,
1009                                  bool initialize)
1010 {
1011         struct amdgpu_device *adev = smu->adev;
1012         int ret;
1013
1014         if (smu_is_dpm_running(smu) && adev->in_suspend) {
1015                 pr_info("dpm has been enabled\n");
1016                 return 0;
1017         }
1018
1019         if (adev->asic_type != CHIP_ARCTURUS) {
1020                 ret = smu_init_display_count(smu, 0);
1021                 if (ret)
1022                         return ret;
1023         }
1024
1025         if (initialize) {
1026                 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1027                 ret = smu_get_vbios_bootup_values(smu);
1028                 if (ret)
1029                         return ret;
1030
1031                 ret = smu_setup_pptable(smu);
1032                 if (ret)
1033                         return ret;
1034
1035                 ret = smu_get_clk_info_from_vbios(smu);
1036                 if (ret)
1037                         return ret;
1038
1039                 /*
1040                  * check if the format_revision in vbios is up to pptable header
1041                  * version, and the structure size is not 0.
1042                  */
1043                 ret = smu_check_pptable(smu);
1044                 if (ret)
1045                         return ret;
1046
1047                 /*
1048                  * allocate vram bos to store smc table contents.
1049                  */
1050                 ret = smu_init_fb_allocations(smu);
1051                 if (ret)
1052                         return ret;
1053
1054                 /*
1055                  * Parse pptable format and fill PPTable_t smc_pptable to
1056                  * smu_table_context structure. And read the smc_dpm_table from vbios,
1057                  * then fill it into smc_pptable.
1058                  */
1059                 ret = smu_parse_pptable(smu);
1060                 if (ret)
1061                         return ret;
1062
1063                 /*
1064                  * Send msg GetDriverIfVersion to check if the return value is equal
1065                  * with DRIVER_IF_VERSION of smc header.
1066                  */
1067                 ret = smu_check_fw_version(smu);
1068                 if (ret)
1069                         return ret;
1070         }
1071
1072         /* smu_dump_pptable(smu); */
1073
1074         /*
1075          * Copy pptable bo in the vram to smc with SMU MSGs such as
1076          * SetDriverDramAddr and TransferTableDram2Smu.
1077          */
1078         ret = smu_write_pptable(smu);
1079         if (ret)
1080                 return ret;
1081
1082         /* issue Run*Btc msg */
1083         ret = smu_run_btc(smu);
1084         if (ret)
1085                 return ret;
1086
1087         ret = smu_feature_set_allowed_mask(smu);
1088         if (ret)
1089                 return ret;
1090
1091         ret = smu_system_features_control(smu, true);
1092         if (ret)
1093                 return ret;
1094
1095         if (adev->asic_type != CHIP_ARCTURUS) {
1096                 ret = smu_override_pcie_parameters(smu);
1097                 if (ret)
1098                         return ret;
1099
1100                 ret = smu_notify_display_change(smu);
1101                 if (ret)
1102                         return ret;
1103
1104                 /*
1105                  * Set min deep sleep dce fclk with bootup value from vbios via
1106                  * SetMinDeepSleepDcefclk MSG.
1107                  */
1108                 ret = smu_set_min_dcef_deep_sleep(smu);
1109                 if (ret)
1110                         return ret;
1111         }
1112
1113         /*
1114          * Set initialized values (get from vbios) to dpm tables context such as
1115          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1116          * type of clks.
1117          */
1118         if (initialize) {
1119                 ret = smu_populate_smc_tables(smu);
1120                 if (ret)
1121                         return ret;
1122
1123                 ret = smu_init_max_sustainable_clocks(smu);
1124                 if (ret)
1125                         return ret;
1126         }
1127
1128         ret = smu_set_default_od_settings(smu, initialize);
1129         if (ret)
1130                 return ret;
1131
1132         if (initialize) {
1133                 ret = smu_populate_umd_state_clk(smu);
1134                 if (ret)
1135                         return ret;
1136
1137                 ret = smu_get_power_limit(smu, &smu->default_power_limit, true);
1138                 if (ret)
1139                         return ret;
1140         }
1141
1142         /*
1143          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1144          */
1145         ret = smu_set_tool_table_location(smu);
1146
1147         if (!smu_is_dpm_running(smu))
1148                 pr_info("dpm has been disabled\n");
1149
1150         return ret;
1151 }
1152
1153 /**
1154  * smu_alloc_memory_pool - allocate memory pool in the system memory
1155  *
1156  * @smu: amdgpu_device pointer
1157  *
1158  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1159  * and DramLogSetDramAddr can notify it changed.
1160  *
1161  * Returns 0 on success, error on failure.
1162  */
1163 static int smu_alloc_memory_pool(struct smu_context *smu)
1164 {
1165         struct amdgpu_device *adev = smu->adev;
1166         struct smu_table_context *smu_table = &smu->smu_table;
1167         struct smu_table *memory_pool = &smu_table->memory_pool;
1168         uint64_t pool_size = smu->pool_size;
1169         int ret = 0;
1170
1171         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1172                 return ret;
1173
1174         memory_pool->size = pool_size;
1175         memory_pool->align = PAGE_SIZE;
1176         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1177
1178         switch (pool_size) {
1179         case SMU_MEMORY_POOL_SIZE_256_MB:
1180         case SMU_MEMORY_POOL_SIZE_512_MB:
1181         case SMU_MEMORY_POOL_SIZE_1_GB:
1182         case SMU_MEMORY_POOL_SIZE_2_GB:
1183                 ret = amdgpu_bo_create_kernel(adev,
1184                                               memory_pool->size,
1185                                               memory_pool->align,
1186                                               memory_pool->domain,
1187                                               &memory_pool->bo,
1188                                               &memory_pool->mc_address,
1189                                               &memory_pool->cpu_addr);
1190                 break;
1191         default:
1192                 break;
1193         }
1194
1195         return ret;
1196 }
1197
1198 static int smu_free_memory_pool(struct smu_context *smu)
1199 {
1200         struct smu_table_context *smu_table = &smu->smu_table;
1201         struct smu_table *memory_pool = &smu_table->memory_pool;
1202         int ret = 0;
1203
1204         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1205                 return ret;
1206
1207         amdgpu_bo_free_kernel(&memory_pool->bo,
1208                               &memory_pool->mc_address,
1209                               &memory_pool->cpu_addr);
1210
1211         memset(memory_pool, 0, sizeof(struct smu_table));
1212
1213         return ret;
1214 }
1215
1216 static int smu_hw_init(void *handle)
1217 {
1218         int ret;
1219         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1220         struct smu_context *smu = &adev->smu;
1221
1222         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1223                 if (adev->asic_type < CHIP_NAVI10) {
1224                         ret = smu_load_microcode(smu);
1225                         if (ret)
1226                                 return ret;
1227                 }
1228         }
1229
1230         ret = smu_check_fw_status(smu);
1231         if (ret) {
1232                 pr_err("SMC firmware status is not correct\n");
1233                 return ret;
1234         }
1235
1236         if (adev->flags & AMD_IS_APU) {
1237                 smu_powergate_sdma(&adev->smu, false);
1238                 smu_powergate_vcn(&adev->smu, false);
1239         }
1240
1241         if (!smu->pm_enabled)
1242                 return 0;
1243
1244         ret = smu_feature_init_dpm(smu);
1245         if (ret)
1246                 goto failed;
1247
1248         ret = smu_smc_table_hw_init(smu, true);
1249         if (ret)
1250                 goto failed;
1251
1252         ret = smu_alloc_memory_pool(smu);
1253         if (ret)
1254                 goto failed;
1255
1256         /*
1257          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1258          * pool location.
1259          */
1260         ret = smu_notify_memory_pool_location(smu);
1261         if (ret)
1262                 goto failed;
1263
1264         ret = smu_start_thermal_control(smu);
1265         if (ret)
1266                 goto failed;
1267
1268         if (!smu->pm_enabled)
1269                 adev->pm.dpm_enabled = false;
1270         else
1271                 adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1272
1273         pr_info("SMU is initialized successfully!\n");
1274
1275         return 0;
1276
1277 failed:
1278         return ret;
1279 }
1280
1281 static int smu_stop_dpms(struct smu_context *smu)
1282 {
1283         return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
1284 }
1285
1286 static int smu_hw_fini(void *handle)
1287 {
1288         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1289         struct smu_context *smu = &adev->smu;
1290         struct smu_table_context *table_context = &smu->smu_table;
1291         int ret = 0;
1292
1293         if (adev->flags & AMD_IS_APU) {
1294                 smu_powergate_sdma(&adev->smu, true);
1295                 smu_powergate_vcn(&adev->smu, true);
1296         }
1297
1298         ret = smu_stop_thermal_control(smu);
1299         if (ret) {
1300                 pr_warn("Fail to stop thermal control!\n");
1301                 return ret;
1302         }
1303
1304         ret = smu_stop_dpms(smu);
1305         if (ret) {
1306                 pr_warn("Fail to stop Dpms!\n");
1307                 return ret;
1308         }
1309
1310         kfree(table_context->driver_pptable);
1311         table_context->driver_pptable = NULL;
1312
1313         kfree(table_context->max_sustainable_clocks);
1314         table_context->max_sustainable_clocks = NULL;
1315
1316         kfree(table_context->overdrive_table);
1317         table_context->overdrive_table = NULL;
1318
1319         ret = smu_fini_fb_allocations(smu);
1320         if (ret)
1321                 return ret;
1322
1323         ret = smu_free_memory_pool(smu);
1324         if (ret)
1325                 return ret;
1326
1327         return 0;
1328 }
1329
1330 int smu_reset(struct smu_context *smu)
1331 {
1332         struct amdgpu_device *adev = smu->adev;
1333         int ret = 0;
1334
1335         ret = smu_hw_fini(adev);
1336         if (ret)
1337                 return ret;
1338
1339         ret = smu_hw_init(adev);
1340         if (ret)
1341                 return ret;
1342
1343         return ret;
1344 }
1345
1346 static int smu_suspend(void *handle)
1347 {
1348         int ret;
1349         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1350         struct smu_context *smu = &adev->smu;
1351         bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
1352
1353         ret = smu_system_features_control(smu, false);
1354         if (ret)
1355                 return ret;
1356
1357         if (adev->in_gpu_reset && baco_feature_is_enabled) {
1358                 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1359                 if (ret) {
1360                         pr_warn("set BACO feature enabled failed, return %d\n", ret);
1361                         return ret;
1362                 }
1363         }
1364
1365         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1366
1367         if (adev->asic_type >= CHIP_NAVI10 &&
1368             adev->gfx.rlc.funcs->stop)
1369                 adev->gfx.rlc.funcs->stop(adev);
1370
1371         return 0;
1372 }
1373
1374 static int smu_resume(void *handle)
1375 {
1376         int ret;
1377         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1378         struct smu_context *smu = &adev->smu;
1379
1380         pr_info("SMU is resuming...\n");
1381
1382         mutex_lock(&smu->mutex);
1383
1384         ret = smu_smc_table_hw_init(smu, false);
1385         if (ret)
1386                 goto failed;
1387
1388         ret = smu_start_thermal_control(smu);
1389         if (ret)
1390                 goto failed;
1391
1392         mutex_unlock(&smu->mutex);
1393
1394         pr_info("SMU is resumed successfully!\n");
1395
1396         return 0;
1397 failed:
1398         mutex_unlock(&smu->mutex);
1399         return ret;
1400 }
1401
1402 int smu_display_configuration_change(struct smu_context *smu,
1403                                      const struct amd_pp_display_configuration *display_config)
1404 {
1405         int index = 0;
1406         int num_of_active_display = 0;
1407
1408         if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1409                 return -EINVAL;
1410
1411         if (!display_config)
1412                 return -EINVAL;
1413
1414         mutex_lock(&smu->mutex);
1415
1416         smu_set_deep_sleep_dcefclk(smu,
1417                                    display_config->min_dcef_deep_sleep_set_clk / 100);
1418
1419         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1420                 if (display_config->displays[index].controller_id != 0)
1421                         num_of_active_display++;
1422         }
1423
1424         smu_set_active_display_count(smu, num_of_active_display);
1425
1426         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1427                            display_config->cpu_cc6_disable,
1428                            display_config->cpu_pstate_disable,
1429                            display_config->nb_pstate_switch_disable);
1430
1431         mutex_unlock(&smu->mutex);
1432
1433         return 0;
1434 }
1435
1436 static int smu_get_clock_info(struct smu_context *smu,
1437                               struct smu_clock_info *clk_info,
1438                               enum smu_perf_level_designation designation)
1439 {
1440         int ret;
1441         struct smu_performance_level level = {0};
1442
1443         if (!clk_info)
1444                 return -EINVAL;
1445
1446         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1447         if (ret)
1448                 return -EINVAL;
1449
1450         clk_info->min_mem_clk = level.memory_clock;
1451         clk_info->min_eng_clk = level.core_clock;
1452         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1453
1454         ret = smu_get_perf_level(smu, designation, &level);
1455         if (ret)
1456                 return -EINVAL;
1457
1458         clk_info->min_mem_clk = level.memory_clock;
1459         clk_info->min_eng_clk = level.core_clock;
1460         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1461
1462         return 0;
1463 }
1464
1465 int smu_get_current_clocks(struct smu_context *smu,
1466                            struct amd_pp_clock_info *clocks)
1467 {
1468         struct amd_pp_simple_clock_info simple_clocks = {0};
1469         struct smu_clock_info hw_clocks;
1470         int ret = 0;
1471
1472         if (!is_support_sw_smu(smu->adev))
1473                 return -EINVAL;
1474
1475         mutex_lock(&smu->mutex);
1476
1477         smu_get_dal_power_level(smu, &simple_clocks);
1478
1479         if (smu->support_power_containment)
1480                 ret = smu_get_clock_info(smu, &hw_clocks,
1481                                          PERF_LEVEL_POWER_CONTAINMENT);
1482         else
1483                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1484
1485         if (ret) {
1486                 pr_err("Error in smu_get_clock_info\n");
1487                 goto failed;
1488         }
1489
1490         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1491         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1492         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1493         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1494         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1495         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1496         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1497         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1498
1499         if (simple_clocks.level == 0)
1500                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1501         else
1502                 clocks->max_clocks_state = simple_clocks.level;
1503
1504         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1505                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1506                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1507         }
1508
1509 failed:
1510         mutex_unlock(&smu->mutex);
1511         return ret;
1512 }
1513
1514 static int smu_set_clockgating_state(void *handle,
1515                                      enum amd_clockgating_state state)
1516 {
1517         return 0;
1518 }
1519
1520 static int smu_set_powergating_state(void *handle,
1521                                      enum amd_powergating_state state)
1522 {
1523         return 0;
1524 }
1525
1526 static int smu_enable_umd_pstate(void *handle,
1527                       enum amd_dpm_forced_level *level)
1528 {
1529         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1530                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1531                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1532                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1533
1534         struct smu_context *smu = (struct smu_context*)(handle);
1535         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1536         if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
1537                 return -EINVAL;
1538
1539         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1540                 /* enter umd pstate, save current level, disable gfx cg*/
1541                 if (*level & profile_mode_mask) {
1542                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1543                         smu_dpm_ctx->enable_umd_pstate = true;
1544                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1545                                                                AMD_IP_BLOCK_TYPE_GFX,
1546                                                                AMD_CG_STATE_UNGATE);
1547                         amdgpu_device_ip_set_powergating_state(smu->adev,
1548                                                                AMD_IP_BLOCK_TYPE_GFX,
1549                                                                AMD_PG_STATE_UNGATE);
1550                 }
1551         } else {
1552                 /* exit umd pstate, restore level, enable gfx cg*/
1553                 if (!(*level & profile_mode_mask)) {
1554                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1555                                 *level = smu_dpm_ctx->saved_dpm_level;
1556                         smu_dpm_ctx->enable_umd_pstate = false;
1557                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1558                                                                AMD_IP_BLOCK_TYPE_GFX,
1559                                                                AMD_CG_STATE_GATE);
1560                         amdgpu_device_ip_set_powergating_state(smu->adev,
1561                                                                AMD_IP_BLOCK_TYPE_GFX,
1562                                                                AMD_PG_STATE_GATE);
1563                 }
1564         }
1565
1566         return 0;
1567 }
1568
1569 static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1570 {
1571         int ret = 0;
1572         uint32_t sclk_mask, mclk_mask, soc_mask;
1573
1574         switch (level) {
1575         case AMD_DPM_FORCED_LEVEL_HIGH:
1576                 ret = smu_force_dpm_limit_value(smu, true);
1577                 break;
1578         case AMD_DPM_FORCED_LEVEL_LOW:
1579                 ret = smu_force_dpm_limit_value(smu, false);
1580                 break;
1581         case AMD_DPM_FORCED_LEVEL_AUTO:
1582         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1583                 ret = smu_unforce_dpm_levels(smu);
1584                 break;
1585         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1586         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1587         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1588                 ret = smu_get_profiling_clk_mask(smu, level,
1589                                                  &sclk_mask,
1590                                                  &mclk_mask,
1591                                                  &soc_mask);
1592                 if (ret)
1593                         return ret;
1594                 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
1595                 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
1596                 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
1597                 break;
1598         case AMD_DPM_FORCED_LEVEL_MANUAL:
1599         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1600         default:
1601                 break;
1602         }
1603         return ret;
1604 }
1605
1606 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1607                                    enum amd_dpm_forced_level level,
1608                                    bool skip_display_settings)
1609 {
1610         int ret = 0;
1611         int index = 0;
1612         long workload;
1613         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1614
1615         if (!smu->pm_enabled)
1616                 return -EINVAL;
1617
1618         if (!skip_display_settings) {
1619                 ret = smu_display_config_changed(smu);
1620                 if (ret) {
1621                         pr_err("Failed to change display config!");
1622                         return ret;
1623                 }
1624         }
1625
1626         ret = smu_apply_clocks_adjust_rules(smu);
1627         if (ret) {
1628                 pr_err("Failed to apply clocks adjust rules!");
1629                 return ret;
1630         }
1631
1632         if (!skip_display_settings) {
1633                 ret = smu_notify_smc_dispaly_config(smu);
1634                 if (ret) {
1635                         pr_err("Failed to notify smc display config!");
1636                         return ret;
1637                 }
1638         }
1639
1640         if (smu_dpm_ctx->dpm_level != level) {
1641                 ret = smu_asic_set_performance_level(smu, level);
1642                 if (ret) {
1643                         ret = smu_default_set_performance_level(smu, level);
1644                         if (ret) {
1645                                 pr_err("Failed to set performance level!");
1646                                 return ret;
1647                         }
1648                 }
1649
1650                 /* update the saved copy */
1651                 smu_dpm_ctx->dpm_level = level;
1652         }
1653
1654         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1655                 index = fls(smu->workload_mask);
1656                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1657                 workload = smu->workload_setting[index];
1658
1659                 if (smu->power_profile_mode != workload)
1660                         smu_set_power_profile_mode(smu, &workload, 0);
1661         }
1662
1663         return ret;
1664 }
1665
1666 int smu_handle_task(struct smu_context *smu,
1667                     enum amd_dpm_forced_level level,
1668                     enum amd_pp_task task_id)
1669 {
1670         int ret = 0;
1671
1672         switch (task_id) {
1673         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1674                 ret = smu_pre_display_config_changed(smu);
1675                 if (ret)
1676                         return ret;
1677                 ret = smu_set_cpu_power_state(smu);
1678                 if (ret)
1679                         return ret;
1680                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1681                 break;
1682         case AMD_PP_TASK_COMPLETE_INIT:
1683         case AMD_PP_TASK_READJUST_POWER_STATE:
1684                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1685                 break;
1686         default:
1687                 break;
1688         }
1689
1690         return ret;
1691 }
1692
1693 int smu_switch_power_profile(struct smu_context *smu,
1694                              enum PP_SMC_POWER_PROFILE type,
1695                              bool en)
1696 {
1697         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1698         long workload;
1699         uint32_t index;
1700
1701         if (!smu->pm_enabled)
1702                 return -EINVAL;
1703
1704         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1705                 return -EINVAL;
1706
1707         mutex_lock(&smu->mutex);
1708
1709         if (!en) {
1710                 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1711                 index = fls(smu->workload_mask);
1712                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1713                 workload = smu->workload_setting[index];
1714         } else {
1715                 smu->workload_mask |= (1 << smu->workload_prority[type]);
1716                 index = fls(smu->workload_mask);
1717                 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1718                 workload = smu->workload_setting[index];
1719         }
1720
1721         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1722                 smu_set_power_profile_mode(smu, &workload, 0);
1723
1724         mutex_unlock(&smu->mutex);
1725
1726         return 0;
1727 }
1728
1729 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1730 {
1731         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1732         enum amd_dpm_forced_level level;
1733
1734         if (!smu_dpm_ctx->dpm_context)
1735                 return -EINVAL;
1736
1737         mutex_lock(&(smu->mutex));
1738         level = smu_dpm_ctx->dpm_level;
1739         mutex_unlock(&(smu->mutex));
1740
1741         return level;
1742 }
1743
1744 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1745 {
1746         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1747         int ret = 0;
1748
1749         if (!smu_dpm_ctx->dpm_context)
1750                 return -EINVAL;
1751
1752         ret = smu_enable_umd_pstate(smu, &level);
1753         if (ret)
1754                 return ret;
1755
1756         ret = smu_handle_task(smu, level,
1757                               AMD_PP_TASK_READJUST_POWER_STATE);
1758
1759         return ret;
1760 }
1761
1762 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1763 {
1764         int ret = 0;
1765
1766         mutex_lock(&smu->mutex);
1767         ret = smu_init_display_count(smu, count);
1768         mutex_unlock(&smu->mutex);
1769
1770         return ret;
1771 }
1772
1773 int smu_force_clk_levels(struct smu_context *smu,
1774                          enum smu_clk_type clk_type,
1775                          uint32_t mask)
1776 {
1777         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1778         int ret = 0;
1779
1780         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1781                 pr_debug("force clock level is for dpm manual mode only.\n");
1782                 return -EINVAL;
1783         }
1784
1785         if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1786                 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1787
1788         return ret;
1789 }
1790
1791 const struct amd_ip_funcs smu_ip_funcs = {
1792         .name = "smu",
1793         .early_init = smu_early_init,
1794         .late_init = smu_late_init,
1795         .sw_init = smu_sw_init,
1796         .sw_fini = smu_sw_fini,
1797         .hw_init = smu_hw_init,
1798         .hw_fini = smu_hw_fini,
1799         .suspend = smu_suspend,
1800         .resume = smu_resume,
1801         .is_idle = NULL,
1802         .check_soft_reset = NULL,
1803         .wait_for_idle = NULL,
1804         .soft_reset = NULL,
1805         .set_clockgating_state = smu_set_clockgating_state,
1806         .set_powergating_state = smu_set_powergating_state,
1807         .enable_umd_pstate = smu_enable_umd_pstate,
1808 };
1809
1810 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1811 {
1812         .type = AMD_IP_BLOCK_TYPE_SMC,
1813         .major = 11,
1814         .minor = 0,
1815         .rev = 0,
1816         .funcs = &smu_ip_funcs,
1817 };
1818
1819 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
1820 {
1821         .type = AMD_IP_BLOCK_TYPE_SMC,
1822         .major = 12,
1823         .minor = 0,
1824         .rev = 0,
1825         .funcs = &smu_ip_funcs,
1826 };