]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
Merge tag 'for-5.3/dm-fixes-1' of git://git.kernel.org/pub/scm/linux/kernel/git/devic...
[linux.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/firmware.h>
24
25 #include "pp_debug.h"
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
29 #include "smu_v11_0.h"
30 #include "atom.h"
31 #include "amd_pcie.h"
32
33 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
34 {
35         int ret = 0;
36
37         if (!if_version && !smu_version)
38                 return -EINVAL;
39
40         if (if_version) {
41                 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
42                 if (ret)
43                         return ret;
44
45                 ret = smu_read_smc_arg(smu, if_version);
46                 if (ret)
47                         return ret;
48         }
49
50         if (smu_version) {
51                 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
52                 if (ret)
53                         return ret;
54
55                 ret = smu_read_smc_arg(smu, smu_version);
56                 if (ret)
57                         return ret;
58         }
59
60         return ret;
61 }
62
63 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
64                             uint32_t min, uint32_t max)
65 {
66         int ret = 0, clk_id = 0;
67         uint32_t param;
68
69         if (min <= 0 && max <= 0)
70                 return -EINVAL;
71
72         if (!smu_clk_dpm_is_enabled(smu, clk_type))
73                 return 0;
74
75         clk_id = smu_clk_get_index(smu, clk_type);
76         if (clk_id < 0)
77                 return clk_id;
78
79         if (max > 0) {
80                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
81                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
82                                                   param);
83                 if (ret)
84                         return ret;
85         }
86
87         if (min > 0) {
88                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
89                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
90                                                   param);
91                 if (ret)
92                         return ret;
93         }
94
95
96         return ret;
97 }
98
99 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
100                             uint32_t min, uint32_t max)
101 {
102         int ret = 0, clk_id = 0;
103         uint32_t param;
104
105         if (min <= 0 && max <= 0)
106                 return -EINVAL;
107
108         if (!smu_clk_dpm_is_enabled(smu, clk_type))
109                 return 0;
110
111         clk_id = smu_clk_get_index(smu, clk_type);
112         if (clk_id < 0)
113                 return clk_id;
114
115         if (max > 0) {
116                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
117                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
118                                                   param);
119                 if (ret)
120                         return ret;
121         }
122
123         if (min > 0) {
124                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
125                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
126                                                   param);
127                 if (ret)
128                         return ret;
129         }
130
131
132         return ret;
133 }
134
135 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
136                            uint32_t *min, uint32_t *max)
137 {
138         int ret = 0, clk_id = 0;
139         uint32_t param = 0;
140         uint32_t clock_limit;
141
142         if (!min && !max)
143                 return -EINVAL;
144
145         if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
146                 switch (clk_type) {
147                 case SMU_MCLK:
148                 case SMU_UCLK:
149                         clock_limit = smu->smu_table.boot_values.uclk;
150                         break;
151                 case SMU_GFXCLK:
152                 case SMU_SCLK:
153                         clock_limit = smu->smu_table.boot_values.gfxclk;
154                         break;
155                 case SMU_SOCCLK:
156                         clock_limit = smu->smu_table.boot_values.socclk;
157                         break;
158                 default:
159                         clock_limit = 0;
160                         break;
161                 }
162
163                 /* clock in Mhz unit */
164                 if (min)
165                         *min = clock_limit / 100;
166                 if (max)
167                         *max = clock_limit / 100;
168
169                 return 0;
170         }
171
172         mutex_lock(&smu->mutex);
173         clk_id = smu_clk_get_index(smu, clk_type);
174         if (clk_id < 0) {
175                 ret = -EINVAL;
176                 goto failed;
177         }
178
179         param = (clk_id & 0xffff) << 16;
180
181         if (max) {
182                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
183                 if (ret)
184                         goto failed;
185                 ret = smu_read_smc_arg(smu, max);
186                 if (ret)
187                         goto failed;
188         }
189
190         if (min) {
191                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
192                 if (ret)
193                         goto failed;
194                 ret = smu_read_smc_arg(smu, min);
195                 if (ret)
196                         goto failed;
197         }
198
199 failed:
200         mutex_unlock(&smu->mutex);
201         return ret;
202 }
203
204 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
205                               uint16_t level, uint32_t *value)
206 {
207         int ret = 0, clk_id = 0;
208         uint32_t param;
209
210         if (!value)
211                 return -EINVAL;
212
213         if (!smu_clk_dpm_is_enabled(smu, clk_type))
214                 return 0;
215
216         clk_id = smu_clk_get_index(smu, clk_type);
217         if (clk_id < 0)
218                 return clk_id;
219
220         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
221
222         ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
223                                           param);
224         if (ret)
225                 return ret;
226
227         ret = smu_read_smc_arg(smu, &param);
228         if (ret)
229                 return ret;
230
231         /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
232          * now, we un-support it */
233         *value = param & 0x7fffffff;
234
235         return ret;
236 }
237
238 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
239                             uint32_t *value)
240 {
241         return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
242 }
243
244 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
245 {
246         enum smu_feature_mask feature_id = 0;
247
248         switch (clk_type) {
249         case SMU_MCLK:
250         case SMU_UCLK:
251                 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
252                 break;
253         case SMU_GFXCLK:
254         case SMU_SCLK:
255                 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
256                 break;
257         case SMU_SOCCLK:
258                 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
259                 break;
260         default:
261                 return true;
262         }
263
264         if(!smu_feature_is_enabled(smu, feature_id)) {
265                 pr_warn("smu %d clk dpm feature %d is not enabled\n", clk_type, feature_id);
266                 return false;
267         }
268
269         return true;
270 }
271
272
273 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
274                            bool gate)
275 {
276         int ret = 0;
277
278         switch (block_type) {
279         case AMD_IP_BLOCK_TYPE_UVD:
280                 ret = smu_dpm_set_uvd_enable(smu, gate);
281                 break;
282         case AMD_IP_BLOCK_TYPE_VCE:
283                 ret = smu_dpm_set_vce_enable(smu, gate);
284                 break;
285         case AMD_IP_BLOCK_TYPE_GFX:
286                 ret = smu_gfx_off_control(smu, gate);
287                 break;
288         default:
289                 break;
290         }
291
292         return ret;
293 }
294
295 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
296 {
297         /* not support power state */
298         return POWER_STATE_TYPE_DEFAULT;
299 }
300
301 int smu_get_power_num_states(struct smu_context *smu,
302                              struct pp_states_info *state_info)
303 {
304         if (!state_info)
305                 return -EINVAL;
306
307         /* not support power state */
308         memset(state_info, 0, sizeof(struct pp_states_info));
309         state_info->nums = 1;
310         state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
311
312         return 0;
313 }
314
315 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
316                            void *data, uint32_t *size)
317 {
318         int ret = 0;
319
320         switch (sensor) {
321         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
322                 *((uint32_t *)data) = smu->pstate_sclk;
323                 *size = 4;
324                 break;
325         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
326                 *((uint32_t *)data) = smu->pstate_mclk;
327                 *size = 4;
328                 break;
329         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
330                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
331                 *size = 8;
332                 break;
333         case AMDGPU_PP_SENSOR_UVD_POWER:
334                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
335                 *size = 4;
336                 break;
337         case AMDGPU_PP_SENSOR_VCE_POWER:
338                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
339                 *size = 4;
340                 break;
341         case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
342                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0;
343                 *size = 4;
344                 break;
345         default:
346                 ret = -EINVAL;
347                 break;
348         }
349
350         if (ret)
351                 *size = 0;
352
353         return ret;
354 }
355
356 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
357                      void *table_data, bool drv2smu)
358 {
359         struct smu_table_context *smu_table = &smu->smu_table;
360         struct smu_table *table = NULL;
361         int ret = 0;
362         int table_id = smu_table_get_index(smu, table_index);
363
364         if (!table_data || table_id >= smu_table->table_count)
365                 return -EINVAL;
366
367         table = &smu_table->tables[table_index];
368
369         if (drv2smu)
370                 memcpy(table->cpu_addr, table_data, table->size);
371
372         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
373                                           upper_32_bits(table->mc_address));
374         if (ret)
375                 return ret;
376         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
377                                           lower_32_bits(table->mc_address));
378         if (ret)
379                 return ret;
380         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
381                                           SMU_MSG_TransferTableDram2Smu :
382                                           SMU_MSG_TransferTableSmu2Dram,
383                                           table_id | ((argument & 0xFFFF) << 16));
384         if (ret)
385                 return ret;
386
387         if (!drv2smu)
388                 memcpy(table_data, table->cpu_addr, table->size);
389
390         return ret;
391 }
392
393 bool is_support_sw_smu(struct amdgpu_device *adev)
394 {
395         if (adev->asic_type == CHIP_VEGA20)
396                 return (amdgpu_dpm == 2) ? true : false;
397         else if (adev->asic_type >= CHIP_NAVI10)
398                 return true;
399         else
400                 return false;
401 }
402
403 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
404 {
405         struct smu_table_context *smu_table = &smu->smu_table;
406
407         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
408                 return -EINVAL;
409
410         if (smu_table->hardcode_pptable)
411                 *table = smu_table->hardcode_pptable;
412         else
413                 *table = smu_table->power_play_table;
414
415         return smu_table->power_play_table_size;
416 }
417
418 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
419 {
420         struct smu_table_context *smu_table = &smu->smu_table;
421         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
422         int ret = 0;
423
424         if (!smu->pm_enabled)
425                 return -EINVAL;
426         if (header->usStructureSize != size) {
427                 pr_err("pp table size not matched !\n");
428                 return -EIO;
429         }
430
431         mutex_lock(&smu->mutex);
432         if (!smu_table->hardcode_pptable)
433                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
434         if (!smu_table->hardcode_pptable) {
435                 ret = -ENOMEM;
436                 goto failed;
437         }
438
439         memcpy(smu_table->hardcode_pptable, buf, size);
440         smu_table->power_play_table = smu_table->hardcode_pptable;
441         smu_table->power_play_table_size = size;
442         mutex_unlock(&smu->mutex);
443
444         ret = smu_reset(smu);
445         if (ret)
446                 pr_info("smu reset failed, ret = %d\n", ret);
447
448         return ret;
449
450 failed:
451         mutex_unlock(&smu->mutex);
452         return ret;
453 }
454
455 int smu_feature_init_dpm(struct smu_context *smu)
456 {
457         struct smu_feature *feature = &smu->smu_feature;
458         int ret = 0;
459         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
460
461         if (!smu->pm_enabled)
462                 return ret;
463         mutex_lock(&feature->mutex);
464         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
465         mutex_unlock(&feature->mutex);
466
467         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
468                                              SMU_FEATURE_MAX/32);
469         if (ret)
470                 return ret;
471
472         mutex_lock(&feature->mutex);
473         bitmap_or(feature->allowed, feature->allowed,
474                       (unsigned long *)allowed_feature_mask,
475                       feature->feature_num);
476         mutex_unlock(&feature->mutex);
477
478         return ret;
479 }
480
481 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
482 {
483         struct smu_feature *feature = &smu->smu_feature;
484         uint32_t feature_id;
485         int ret = 0;
486
487         feature_id = smu_feature_get_index(smu, mask);
488
489         WARN_ON(feature_id > feature->feature_num);
490
491         mutex_lock(&feature->mutex);
492         ret = test_bit(feature_id, feature->enabled);
493         mutex_unlock(&feature->mutex);
494
495         return ret;
496 }
497
498 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
499                             bool enable)
500 {
501         struct smu_feature *feature = &smu->smu_feature;
502         uint32_t feature_id;
503         int ret = 0;
504
505         feature_id = smu_feature_get_index(smu, mask);
506
507         WARN_ON(feature_id > feature->feature_num);
508
509         mutex_lock(&feature->mutex);
510         ret = smu_feature_update_enable_state(smu, feature_id, enable);
511         if (ret)
512                 goto failed;
513
514         if (enable)
515                 test_and_set_bit(feature_id, feature->enabled);
516         else
517                 test_and_clear_bit(feature_id, feature->enabled);
518
519 failed:
520         mutex_unlock(&feature->mutex);
521
522         return ret;
523 }
524
525 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
526 {
527         struct smu_feature *feature = &smu->smu_feature;
528         uint32_t feature_id;
529         int ret = 0;
530
531         feature_id = smu_feature_get_index(smu, mask);
532
533         WARN_ON(feature_id > feature->feature_num);
534
535         mutex_lock(&feature->mutex);
536         ret = test_bit(feature_id, feature->supported);
537         mutex_unlock(&feature->mutex);
538
539         return ret;
540 }
541
542 int smu_feature_set_supported(struct smu_context *smu,
543                               enum smu_feature_mask mask,
544                               bool enable)
545 {
546         struct smu_feature *feature = &smu->smu_feature;
547         uint32_t feature_id;
548         int ret = 0;
549
550         feature_id = smu_feature_get_index(smu, mask);
551
552         WARN_ON(feature_id > feature->feature_num);
553
554         mutex_lock(&feature->mutex);
555         if (enable)
556                 test_and_set_bit(feature_id, feature->supported);
557         else
558                 test_and_clear_bit(feature_id, feature->supported);
559         mutex_unlock(&feature->mutex);
560
561         return ret;
562 }
563
564 static int smu_set_funcs(struct amdgpu_device *adev)
565 {
566         struct smu_context *smu = &adev->smu;
567
568         switch (adev->asic_type) {
569         case CHIP_VEGA20:
570         case CHIP_NAVI10:
571                 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
572                         smu->od_enabled = true;
573                 smu_v11_0_set_smu_funcs(smu);
574                 break;
575         default:
576                 return -EINVAL;
577         }
578
579         return 0;
580 }
581
582 static int smu_early_init(void *handle)
583 {
584         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
585         struct smu_context *smu = &adev->smu;
586
587         smu->adev = adev;
588         smu->pm_enabled = !!amdgpu_dpm;
589         mutex_init(&smu->mutex);
590
591         return smu_set_funcs(adev);
592 }
593
594 static int smu_late_init(void *handle)
595 {
596         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
597         struct smu_context *smu = &adev->smu;
598
599         if (!smu->pm_enabled)
600                 return 0;
601         mutex_lock(&smu->mutex);
602         smu_handle_task(&adev->smu,
603                         smu->smu_dpm.dpm_level,
604                         AMD_PP_TASK_COMPLETE_INIT);
605         mutex_unlock(&smu->mutex);
606
607         return 0;
608 }
609
610 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
611                             uint16_t *size, uint8_t *frev, uint8_t *crev,
612                             uint8_t **addr)
613 {
614         struct amdgpu_device *adev = smu->adev;
615         uint16_t data_start;
616
617         if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
618                                            size, frev, crev, &data_start))
619                 return -EINVAL;
620
621         *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
622
623         return 0;
624 }
625
626 static int smu_initialize_pptable(struct smu_context *smu)
627 {
628         /* TODO */
629         return 0;
630 }
631
632 static int smu_smc_table_sw_init(struct smu_context *smu)
633 {
634         int ret;
635
636         ret = smu_initialize_pptable(smu);
637         if (ret) {
638                 pr_err("Failed to init smu_initialize_pptable!\n");
639                 return ret;
640         }
641
642         /**
643          * Create smu_table structure, and init smc tables such as
644          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
645          */
646         ret = smu_init_smc_tables(smu);
647         if (ret) {
648                 pr_err("Failed to init smc tables!\n");
649                 return ret;
650         }
651
652         /**
653          * Create smu_power_context structure, and allocate smu_dpm_context and
654          * context size to fill the smu_power_context data.
655          */
656         ret = smu_init_power(smu);
657         if (ret) {
658                 pr_err("Failed to init smu_init_power!\n");
659                 return ret;
660         }
661
662         return 0;
663 }
664
665 static int smu_smc_table_sw_fini(struct smu_context *smu)
666 {
667         int ret;
668
669         ret = smu_fini_smc_tables(smu);
670         if (ret) {
671                 pr_err("Failed to smu_fini_smc_tables!\n");
672                 return ret;
673         }
674
675         return 0;
676 }
677
678 static int smu_sw_init(void *handle)
679 {
680         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
681         struct smu_context *smu = &adev->smu;
682         int ret;
683
684         smu->pool_size = adev->pm.smu_prv_buffer_size;
685         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
686         mutex_init(&smu->smu_feature.mutex);
687         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
688         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
689         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
690
691         mutex_init(&smu->smu_baco.mutex);
692         smu->smu_baco.state = SMU_BACO_STATE_EXIT;
693         smu->smu_baco.platform_support = false;
694
695         smu->watermarks_bitmap = 0;
696         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
697         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
698
699         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
700         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
701         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
702         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
703         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
704         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
705         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
706         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
707
708         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
709         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
710         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
711         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
712         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
713         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
714         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
715         smu->display_config = &adev->pm.pm_display_cfg;
716
717         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
718         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
719         ret = smu_init_microcode(smu);
720         if (ret) {
721                 pr_err("Failed to load smu firmware!\n");
722                 return ret;
723         }
724
725         ret = smu_smc_table_sw_init(smu);
726         if (ret) {
727                 pr_err("Failed to sw init smc table!\n");
728                 return ret;
729         }
730
731         ret = smu_register_irq_handler(smu);
732         if (ret) {
733                 pr_err("Failed to register smc irq handler!\n");
734                 return ret;
735         }
736
737         return 0;
738 }
739
740 static int smu_sw_fini(void *handle)
741 {
742         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743         struct smu_context *smu = &adev->smu;
744         int ret;
745
746         kfree(smu->irq_source);
747         smu->irq_source = NULL;
748
749         ret = smu_smc_table_sw_fini(smu);
750         if (ret) {
751                 pr_err("Failed to sw fini smc table!\n");
752                 return ret;
753         }
754
755         ret = smu_fini_power(smu);
756         if (ret) {
757                 pr_err("Failed to init smu_fini_power!\n");
758                 return ret;
759         }
760
761         return 0;
762 }
763
764 static int smu_init_fb_allocations(struct smu_context *smu)
765 {
766         struct amdgpu_device *adev = smu->adev;
767         struct smu_table_context *smu_table = &smu->smu_table;
768         struct smu_table *tables = smu_table->tables;
769         uint32_t table_count = smu_table->table_count;
770         uint32_t i = 0;
771         int32_t ret = 0;
772
773         if (table_count <= 0)
774                 return -EINVAL;
775
776         for (i = 0 ; i < table_count; i++) {
777                 if (tables[i].size == 0)
778                         continue;
779                 ret = amdgpu_bo_create_kernel(adev,
780                                               tables[i].size,
781                                               tables[i].align,
782                                               tables[i].domain,
783                                               &tables[i].bo,
784                                               &tables[i].mc_address,
785                                               &tables[i].cpu_addr);
786                 if (ret)
787                         goto failed;
788         }
789
790         return 0;
791 failed:
792         for (; i > 0; i--) {
793                 if (tables[i].size == 0)
794                         continue;
795                 amdgpu_bo_free_kernel(&tables[i].bo,
796                                       &tables[i].mc_address,
797                                       &tables[i].cpu_addr);
798
799         }
800         return ret;
801 }
802
803 static int smu_fini_fb_allocations(struct smu_context *smu)
804 {
805         struct smu_table_context *smu_table = &smu->smu_table;
806         struct smu_table *tables = smu_table->tables;
807         uint32_t table_count = smu_table->table_count;
808         uint32_t i = 0;
809
810         if (table_count == 0 || tables == NULL)
811                 return 0;
812
813         for (i = 0 ; i < table_count; i++) {
814                 if (tables[i].size == 0)
815                         continue;
816                 amdgpu_bo_free_kernel(&tables[i].bo,
817                                       &tables[i].mc_address,
818                                       &tables[i].cpu_addr);
819         }
820
821         return 0;
822 }
823
824 static int smu_override_pcie_parameters(struct smu_context *smu)
825 {
826         struct amdgpu_device *adev = smu->adev;
827         uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
828         int ret;
829
830         if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
831                 pcie_gen = 3;
832         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
833                 pcie_gen = 2;
834         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
835                 pcie_gen = 1;
836         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
837                 pcie_gen = 0;
838
839         /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
840          * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
841          * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
842          */
843         if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
844                 pcie_width = 6;
845         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
846                 pcie_width = 5;
847         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
848                 pcie_width = 4;
849         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
850                 pcie_width = 3;
851         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
852                 pcie_width = 2;
853         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
854                 pcie_width = 1;
855
856         smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
857         ret = smu_send_smc_msg_with_param(smu,
858                                           SMU_MSG_OverridePcieParameters,
859                                           smu_pcie_arg);
860         if (ret)
861                 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
862         return ret;
863 }
864
865 static int smu_smc_table_hw_init(struct smu_context *smu,
866                                  bool initialize)
867 {
868         struct amdgpu_device *adev = smu->adev;
869         int ret;
870
871         if (smu_is_dpm_running(smu) && adev->in_suspend) {
872                 pr_info("dpm has been enabled\n");
873                 return 0;
874         }
875
876         ret = smu_init_display_count(smu, 0);
877         if (ret)
878                 return ret;
879
880         if (initialize) {
881                 /* get boot_values from vbios to set revision, gfxclk, and etc. */
882                 ret = smu_get_vbios_bootup_values(smu);
883                 if (ret)
884                         return ret;
885
886                 ret = smu_setup_pptable(smu);
887                 if (ret)
888                         return ret;
889
890                 ret = smu_get_clk_info_from_vbios(smu);
891                 if (ret)
892                         return ret;
893
894                 /*
895                  * check if the format_revision in vbios is up to pptable header
896                  * version, and the structure size is not 0.
897                  */
898                 ret = smu_check_pptable(smu);
899                 if (ret)
900                         return ret;
901
902                 /*
903                  * allocate vram bos to store smc table contents.
904                  */
905                 ret = smu_init_fb_allocations(smu);
906                 if (ret)
907                         return ret;
908
909                 /*
910                  * Parse pptable format and fill PPTable_t smc_pptable to
911                  * smu_table_context structure. And read the smc_dpm_table from vbios,
912                  * then fill it into smc_pptable.
913                  */
914                 ret = smu_parse_pptable(smu);
915                 if (ret)
916                         return ret;
917
918                 /*
919                  * Send msg GetDriverIfVersion to check if the return value is equal
920                  * with DRIVER_IF_VERSION of smc header.
921                  */
922                 ret = smu_check_fw_version(smu);
923                 if (ret)
924                         return ret;
925         }
926
927         /*
928          * Copy pptable bo in the vram to smc with SMU MSGs such as
929          * SetDriverDramAddr and TransferTableDram2Smu.
930          */
931         ret = smu_write_pptable(smu);
932         if (ret)
933                 return ret;
934
935         /* issue RunAfllBtc msg */
936         ret = smu_run_afll_btc(smu);
937         if (ret)
938                 return ret;
939
940         ret = smu_feature_set_allowed_mask(smu);
941         if (ret)
942                 return ret;
943
944         ret = smu_system_features_control(smu, true);
945         if (ret)
946                 return ret;
947
948         ret = smu_override_pcie_parameters(smu);
949         if (ret)
950                 return ret;
951
952         ret = smu_notify_display_change(smu);
953         if (ret)
954                 return ret;
955
956         /*
957          * Set min deep sleep dce fclk with bootup value from vbios via
958          * SetMinDeepSleepDcefclk MSG.
959          */
960         ret = smu_set_min_dcef_deep_sleep(smu);
961         if (ret)
962                 return ret;
963
964         /*
965          * Set initialized values (get from vbios) to dpm tables context such as
966          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
967          * type of clks.
968          */
969         if (initialize) {
970                 ret = smu_populate_smc_pptable(smu);
971                 if (ret)
972                         return ret;
973
974                 ret = smu_init_max_sustainable_clocks(smu);
975                 if (ret)
976                         return ret;
977         }
978
979         ret = smu_set_default_od_settings(smu, initialize);
980         if (ret)
981                 return ret;
982
983         if (initialize) {
984                 ret = smu_populate_umd_state_clk(smu);
985                 if (ret)
986                         return ret;
987
988                 ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
989                 if (ret)
990                         return ret;
991         }
992
993         /*
994          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
995          */
996         ret = smu_set_tool_table_location(smu);
997
998         if (!smu_is_dpm_running(smu))
999                 pr_info("dpm has been disabled\n");
1000
1001         return ret;
1002 }
1003
1004 /**
1005  * smu_alloc_memory_pool - allocate memory pool in the system memory
1006  *
1007  * @smu: amdgpu_device pointer
1008  *
1009  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1010  * and DramLogSetDramAddr can notify it changed.
1011  *
1012  * Returns 0 on success, error on failure.
1013  */
1014 static int smu_alloc_memory_pool(struct smu_context *smu)
1015 {
1016         struct amdgpu_device *adev = smu->adev;
1017         struct smu_table_context *smu_table = &smu->smu_table;
1018         struct smu_table *memory_pool = &smu_table->memory_pool;
1019         uint64_t pool_size = smu->pool_size;
1020         int ret = 0;
1021
1022         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1023                 return ret;
1024
1025         memory_pool->size = pool_size;
1026         memory_pool->align = PAGE_SIZE;
1027         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1028
1029         switch (pool_size) {
1030         case SMU_MEMORY_POOL_SIZE_256_MB:
1031         case SMU_MEMORY_POOL_SIZE_512_MB:
1032         case SMU_MEMORY_POOL_SIZE_1_GB:
1033         case SMU_MEMORY_POOL_SIZE_2_GB:
1034                 ret = amdgpu_bo_create_kernel(adev,
1035                                               memory_pool->size,
1036                                               memory_pool->align,
1037                                               memory_pool->domain,
1038                                               &memory_pool->bo,
1039                                               &memory_pool->mc_address,
1040                                               &memory_pool->cpu_addr);
1041                 break;
1042         default:
1043                 break;
1044         }
1045
1046         return ret;
1047 }
1048
1049 static int smu_free_memory_pool(struct smu_context *smu)
1050 {
1051         struct smu_table_context *smu_table = &smu->smu_table;
1052         struct smu_table *memory_pool = &smu_table->memory_pool;
1053         int ret = 0;
1054
1055         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1056                 return ret;
1057
1058         amdgpu_bo_free_kernel(&memory_pool->bo,
1059                               &memory_pool->mc_address,
1060                               &memory_pool->cpu_addr);
1061
1062         memset(memory_pool, 0, sizeof(struct smu_table));
1063
1064         return ret;
1065 }
1066
1067 static int smu_hw_init(void *handle)
1068 {
1069         int ret;
1070         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1071         struct smu_context *smu = &adev->smu;
1072
1073         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1074                 ret = smu_check_fw_status(smu);
1075                 if (ret) {
1076                         pr_err("SMC firmware status is not correct\n");
1077                         return ret;
1078                 }
1079         }
1080
1081         ret = smu_feature_init_dpm(smu);
1082         if (ret)
1083                 goto failed;
1084
1085         ret = smu_smc_table_hw_init(smu, true);
1086         if (ret)
1087                 goto failed;
1088
1089         ret = smu_alloc_memory_pool(smu);
1090         if (ret)
1091                 goto failed;
1092
1093         /*
1094          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1095          * pool location.
1096          */
1097         ret = smu_notify_memory_pool_location(smu);
1098         if (ret)
1099                 goto failed;
1100
1101         ret = smu_start_thermal_control(smu);
1102         if (ret)
1103                 goto failed;
1104
1105         if (!smu->pm_enabled)
1106                 adev->pm.dpm_enabled = false;
1107         else
1108                 adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1109
1110         pr_info("SMU is initialized successfully!\n");
1111
1112         return 0;
1113
1114 failed:
1115         return ret;
1116 }
1117
1118 static int smu_hw_fini(void *handle)
1119 {
1120         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1121         struct smu_context *smu = &adev->smu;
1122         struct smu_table_context *table_context = &smu->smu_table;
1123         int ret = 0;
1124
1125         kfree(table_context->driver_pptable);
1126         table_context->driver_pptable = NULL;
1127
1128         kfree(table_context->max_sustainable_clocks);
1129         table_context->max_sustainable_clocks = NULL;
1130
1131         kfree(table_context->overdrive_table);
1132         table_context->overdrive_table = NULL;
1133
1134         ret = smu_fini_fb_allocations(smu);
1135         if (ret)
1136                 return ret;
1137
1138         ret = smu_free_memory_pool(smu);
1139         if (ret)
1140                 return ret;
1141
1142         return 0;
1143 }
1144
1145 int smu_reset(struct smu_context *smu)
1146 {
1147         struct amdgpu_device *adev = smu->adev;
1148         int ret = 0;
1149
1150         ret = smu_hw_fini(adev);
1151         if (ret)
1152                 return ret;
1153
1154         ret = smu_hw_init(adev);
1155         if (ret)
1156                 return ret;
1157
1158         return ret;
1159 }
1160
1161 static int smu_suspend(void *handle)
1162 {
1163         int ret;
1164         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1165         struct smu_context *smu = &adev->smu;
1166         bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
1167
1168         ret = smu_system_features_control(smu, false);
1169         if (ret)
1170                 return ret;
1171
1172         if (adev->in_gpu_reset && baco_feature_is_enabled) {
1173                 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1174                 if (ret) {
1175                         pr_warn("set BACO feature enabled failed, return %d\n", ret);
1176                         return ret;
1177                 }
1178         }
1179
1180         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1181
1182         if (adev->asic_type >= CHIP_NAVI10 &&
1183             adev->gfx.rlc.funcs->stop)
1184                 adev->gfx.rlc.funcs->stop(adev);
1185
1186         return 0;
1187 }
1188
1189 static int smu_resume(void *handle)
1190 {
1191         int ret;
1192         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1193         struct smu_context *smu = &adev->smu;
1194
1195         pr_info("SMU is resuming...\n");
1196
1197         mutex_lock(&smu->mutex);
1198
1199         ret = smu_smc_table_hw_init(smu, false);
1200         if (ret)
1201                 goto failed;
1202
1203         ret = smu_start_thermal_control(smu);
1204         if (ret)
1205                 goto failed;
1206
1207         mutex_unlock(&smu->mutex);
1208
1209         pr_info("SMU is resumed successfully!\n");
1210
1211         return 0;
1212 failed:
1213         mutex_unlock(&smu->mutex);
1214         return ret;
1215 }
1216
1217 int smu_display_configuration_change(struct smu_context *smu,
1218                                      const struct amd_pp_display_configuration *display_config)
1219 {
1220         int index = 0;
1221         int num_of_active_display = 0;
1222
1223         if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1224                 return -EINVAL;
1225
1226         if (!display_config)
1227                 return -EINVAL;
1228
1229         mutex_lock(&smu->mutex);
1230
1231         smu_set_deep_sleep_dcefclk(smu,
1232                                    display_config->min_dcef_deep_sleep_set_clk / 100);
1233
1234         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1235                 if (display_config->displays[index].controller_id != 0)
1236                         num_of_active_display++;
1237         }
1238
1239         smu_set_active_display_count(smu, num_of_active_display);
1240
1241         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1242                            display_config->cpu_cc6_disable,
1243                            display_config->cpu_pstate_disable,
1244                            display_config->nb_pstate_switch_disable);
1245
1246         mutex_unlock(&smu->mutex);
1247
1248         return 0;
1249 }
1250
1251 static int smu_get_clock_info(struct smu_context *smu,
1252                               struct smu_clock_info *clk_info,
1253                               enum smu_perf_level_designation designation)
1254 {
1255         int ret;
1256         struct smu_performance_level level = {0};
1257
1258         if (!clk_info)
1259                 return -EINVAL;
1260
1261         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1262         if (ret)
1263                 return -EINVAL;
1264
1265         clk_info->min_mem_clk = level.memory_clock;
1266         clk_info->min_eng_clk = level.core_clock;
1267         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1268
1269         ret = smu_get_perf_level(smu, designation, &level);
1270         if (ret)
1271                 return -EINVAL;
1272
1273         clk_info->min_mem_clk = level.memory_clock;
1274         clk_info->min_eng_clk = level.core_clock;
1275         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1276
1277         return 0;
1278 }
1279
1280 int smu_get_current_clocks(struct smu_context *smu,
1281                            struct amd_pp_clock_info *clocks)
1282 {
1283         struct amd_pp_simple_clock_info simple_clocks = {0};
1284         struct smu_clock_info hw_clocks;
1285         int ret = 0;
1286
1287         if (!is_support_sw_smu(smu->adev))
1288                 return -EINVAL;
1289
1290         mutex_lock(&smu->mutex);
1291
1292         smu_get_dal_power_level(smu, &simple_clocks);
1293
1294         if (smu->support_power_containment)
1295                 ret = smu_get_clock_info(smu, &hw_clocks,
1296                                          PERF_LEVEL_POWER_CONTAINMENT);
1297         else
1298                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1299
1300         if (ret) {
1301                 pr_err("Error in smu_get_clock_info\n");
1302                 goto failed;
1303         }
1304
1305         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1306         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1307         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1308         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1309         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1310         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1311         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1312         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1313
1314         if (simple_clocks.level == 0)
1315                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1316         else
1317                 clocks->max_clocks_state = simple_clocks.level;
1318
1319         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1320                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1321                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1322         }
1323
1324 failed:
1325         mutex_unlock(&smu->mutex);
1326         return ret;
1327 }
1328
1329 static int smu_set_clockgating_state(void *handle,
1330                                      enum amd_clockgating_state state)
1331 {
1332         return 0;
1333 }
1334
1335 static int smu_set_powergating_state(void *handle,
1336                                      enum amd_powergating_state state)
1337 {
1338         return 0;
1339 }
1340
1341 static int smu_enable_umd_pstate(void *handle,
1342                       enum amd_dpm_forced_level *level)
1343 {
1344         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1345                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1346                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1347                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1348
1349         struct smu_context *smu = (struct smu_context*)(handle);
1350         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1351         if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
1352                 return -EINVAL;
1353
1354         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1355                 /* enter umd pstate, save current level, disable gfx cg*/
1356                 if (*level & profile_mode_mask) {
1357                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1358                         smu_dpm_ctx->enable_umd_pstate = true;
1359                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1360                                                                AMD_IP_BLOCK_TYPE_GFX,
1361                                                                AMD_CG_STATE_UNGATE);
1362                         amdgpu_device_ip_set_powergating_state(smu->adev,
1363                                                                AMD_IP_BLOCK_TYPE_GFX,
1364                                                                AMD_PG_STATE_UNGATE);
1365                 }
1366         } else {
1367                 /* exit umd pstate, restore level, enable gfx cg*/
1368                 if (!(*level & profile_mode_mask)) {
1369                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1370                                 *level = smu_dpm_ctx->saved_dpm_level;
1371                         smu_dpm_ctx->enable_umd_pstate = false;
1372                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1373                                                                AMD_IP_BLOCK_TYPE_GFX,
1374                                                                AMD_CG_STATE_GATE);
1375                         amdgpu_device_ip_set_powergating_state(smu->adev,
1376                                                                AMD_IP_BLOCK_TYPE_GFX,
1377                                                                AMD_PG_STATE_GATE);
1378                 }
1379         }
1380
1381         return 0;
1382 }
1383
1384 static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1385 {
1386         int ret = 0;
1387         uint32_t sclk_mask, mclk_mask, soc_mask;
1388
1389         switch (level) {
1390         case AMD_DPM_FORCED_LEVEL_HIGH:
1391                 ret = smu_force_dpm_limit_value(smu, true);
1392                 break;
1393         case AMD_DPM_FORCED_LEVEL_LOW:
1394                 ret = smu_force_dpm_limit_value(smu, false);
1395                 break;
1396         case AMD_DPM_FORCED_LEVEL_AUTO:
1397         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1398                 ret = smu_unforce_dpm_levels(smu);
1399                 break;
1400         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1401         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1402         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1403                 ret = smu_get_profiling_clk_mask(smu, level,
1404                                                  &sclk_mask,
1405                                                  &mclk_mask,
1406                                                  &soc_mask);
1407                 if (ret)
1408                         return ret;
1409                 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
1410                 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
1411                 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
1412                 break;
1413         case AMD_DPM_FORCED_LEVEL_MANUAL:
1414         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1415         default:
1416                 break;
1417         }
1418         return ret;
1419 }
1420
1421 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1422                                    enum amd_dpm_forced_level level,
1423                                    bool skip_display_settings)
1424 {
1425         int ret = 0;
1426         int index = 0;
1427         long workload;
1428         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1429
1430         if (!smu->pm_enabled)
1431                 return -EINVAL;
1432         if (!skip_display_settings) {
1433                 ret = smu_display_config_changed(smu);
1434                 if (ret) {
1435                         pr_err("Failed to change display config!");
1436                         return ret;
1437                 }
1438         }
1439
1440         if (!smu->pm_enabled)
1441                 return -EINVAL;
1442         ret = smu_apply_clocks_adjust_rules(smu);
1443         if (ret) {
1444                 pr_err("Failed to apply clocks adjust rules!");
1445                 return ret;
1446         }
1447
1448         if (!skip_display_settings) {
1449                 ret = smu_notify_smc_dispaly_config(smu);
1450                 if (ret) {
1451                         pr_err("Failed to notify smc display config!");
1452                         return ret;
1453                 }
1454         }
1455
1456         if (smu_dpm_ctx->dpm_level != level) {
1457                 ret = smu_asic_set_performance_level(smu, level);
1458                 if (ret) {
1459                         ret = smu_default_set_performance_level(smu, level);
1460                 }
1461                 if (!ret)
1462                         smu_dpm_ctx->dpm_level = level;
1463         }
1464
1465         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1466                 index = fls(smu->workload_mask);
1467                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1468                 workload = smu->workload_setting[index];
1469
1470                 if (smu->power_profile_mode != workload)
1471                         smu_set_power_profile_mode(smu, &workload, 0);
1472         }
1473
1474         return ret;
1475 }
1476
1477 int smu_handle_task(struct smu_context *smu,
1478                     enum amd_dpm_forced_level level,
1479                     enum amd_pp_task task_id)
1480 {
1481         int ret = 0;
1482
1483         switch (task_id) {
1484         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1485                 ret = smu_pre_display_config_changed(smu);
1486                 if (ret)
1487                         return ret;
1488                 ret = smu_set_cpu_power_state(smu);
1489                 if (ret)
1490                         return ret;
1491                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1492                 break;
1493         case AMD_PP_TASK_COMPLETE_INIT:
1494         case AMD_PP_TASK_READJUST_POWER_STATE:
1495                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1496                 break;
1497         default:
1498                 break;
1499         }
1500
1501         return ret;
1502 }
1503
1504 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1505 {
1506         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1507         enum amd_dpm_forced_level level;
1508
1509         if (!smu_dpm_ctx->dpm_context)
1510                 return -EINVAL;
1511
1512         mutex_lock(&(smu->mutex));
1513         level = smu_dpm_ctx->dpm_level;
1514         mutex_unlock(&(smu->mutex));
1515
1516         return level;
1517 }
1518
1519 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1520 {
1521         int ret = 0;
1522         int i;
1523         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1524
1525         if (!smu_dpm_ctx->dpm_context)
1526                 return -EINVAL;
1527
1528         for (i = 0; i < smu->adev->num_ip_blocks; i++) {
1529                 if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
1530                         break;
1531         }
1532
1533
1534         smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
1535         ret = smu_handle_task(smu, level,
1536                               AMD_PP_TASK_READJUST_POWER_STATE);
1537         if (ret)
1538                 return ret;
1539
1540         mutex_lock(&smu->mutex);
1541         smu_dpm_ctx->dpm_level = level;
1542         mutex_unlock(&smu->mutex);
1543
1544         return ret;
1545 }
1546
1547 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1548 {
1549         int ret = 0;
1550
1551         mutex_lock(&smu->mutex);
1552         ret = smu_init_display_count(smu, count);
1553         mutex_unlock(&smu->mutex);
1554
1555         return ret;
1556 }
1557
1558 const struct amd_ip_funcs smu_ip_funcs = {
1559         .name = "smu",
1560         .early_init = smu_early_init,
1561         .late_init = smu_late_init,
1562         .sw_init = smu_sw_init,
1563         .sw_fini = smu_sw_fini,
1564         .hw_init = smu_hw_init,
1565         .hw_fini = smu_hw_fini,
1566         .suspend = smu_suspend,
1567         .resume = smu_resume,
1568         .is_idle = NULL,
1569         .check_soft_reset = NULL,
1570         .wait_for_idle = NULL,
1571         .soft_reset = NULL,
1572         .set_clockgating_state = smu_set_clockgating_state,
1573         .set_powergating_state = smu_set_powergating_state,
1574         .enable_umd_pstate = smu_enable_umd_pstate,
1575 };
1576
1577 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1578 {
1579         .type = AMD_IP_BLOCK_TYPE_SMC,
1580         .major = 11,
1581         .minor = 0,
1582         .rev = 0,
1583         .funcs = &smu_ip_funcs,
1584 };