]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/powerplay/smu_v11_0.c
Merge tag 'drm-misc-next-2019-06-05' of git://anongit.freedesktop.org/drm/drm-misc...
[linux.git] / drivers / gpu / drm / amd / powerplay / smu_v11_0.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include "pp_debug.h"
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_smu.h"
27 #include "atomfirmware.h"
28 #include "amdgpu_atomfirmware.h"
29 #include "smu_v11_0.h"
30 #include "smu11_driver_if.h"
31 #include "soc15_common.h"
32 #include "atom.h"
33 #include "vega20_ppt.h"
34 #include "pp_thermal.h"
35
36 #include "asic_reg/thm/thm_11_0_2_offset.h"
37 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
38 #include "asic_reg/mp/mp_9_0_offset.h"
39 #include "asic_reg/mp/mp_9_0_sh_mask.h"
40 #include "asic_reg/nbio/nbio_7_4_offset.h"
41 #include "asic_reg/smuio/smuio_9_0_offset.h"
42 #include "asic_reg/smuio/smuio_9_0_sh_mask.h"
43
44 MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
45
46 #define SMU11_TOOL_SIZE         0x19000
47 #define SMU11_THERMAL_MINIMUM_ALERT_TEMP      0
48 #define SMU11_THERMAL_MAXIMUM_ALERT_TEMP      255
49
50 #define SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES 1000
51 #define SMU11_VOLTAGE_SCALE 4
52
53 #define SMC_DPM_FEATURE (FEATURE_DPM_PREFETCHER_MASK | \
54                          FEATURE_DPM_GFXCLK_MASK | \
55                          FEATURE_DPM_UCLK_MASK | \
56                          FEATURE_DPM_SOCCLK_MASK | \
57                          FEATURE_DPM_UVD_MASK | \
58                          FEATURE_DPM_VCE_MASK | \
59                          FEATURE_DPM_MP0CLK_MASK | \
60                          FEATURE_DPM_LINK_MASK | \
61                          FEATURE_DPM_DCEFCLK_MASK)
62
63 static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
64                                               uint16_t msg)
65 {
66         struct amdgpu_device *adev = smu->adev;
67         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
68         return 0;
69 }
70
71 static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
72 {
73         struct amdgpu_device *adev = smu->adev;
74
75         *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
76         return 0;
77 }
78
79 static int smu_v11_0_wait_for_response(struct smu_context *smu)
80 {
81         struct amdgpu_device *adev = smu->adev;
82         uint32_t cur_value, i;
83
84         for (i = 0; i < adev->usec_timeout; i++) {
85                 cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
86                 if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
87                         break;
88                 udelay(1);
89         }
90
91         /* timeout means wrong logic */
92         if (i == adev->usec_timeout)
93                 return -ETIME;
94
95         return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
96 }
97
98 static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
99 {
100         struct amdgpu_device *adev = smu->adev;
101         int ret = 0, index = 0;
102
103         index = smu_msg_get_index(smu, msg);
104         if (index < 0)
105                 return index;
106
107         smu_v11_0_wait_for_response(smu);
108
109         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
110
111         smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
112
113         ret = smu_v11_0_wait_for_response(smu);
114
115         if (ret)
116                 pr_err("Failed to send message 0x%x, response 0x%x\n", index,
117                        ret);
118
119         return ret;
120
121 }
122
123 static int
124 smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
125                               uint32_t param)
126 {
127
128         struct amdgpu_device *adev = smu->adev;
129         int ret = 0, index = 0;
130
131         index = smu_msg_get_index(smu, msg);
132         if (index < 0)
133                 return index;
134
135         ret = smu_v11_0_wait_for_response(smu);
136         if (ret)
137                 pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n",
138                        index, ret, param);
139
140         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
141
142         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
143
144         smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
145
146         ret = smu_v11_0_wait_for_response(smu);
147         if (ret)
148                 pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
149                        index, ret, param);
150
151         return ret;
152 }
153
154 static int smu_v11_0_init_microcode(struct smu_context *smu)
155 {
156         struct amdgpu_device *adev = smu->adev;
157         const char *chip_name;
158         char fw_name[30];
159         int err = 0;
160         const struct smc_firmware_header_v1_0 *hdr;
161         const struct common_firmware_header *header;
162         struct amdgpu_firmware_info *ucode = NULL;
163
164         switch (adev->asic_type) {
165         case CHIP_VEGA20:
166                 chip_name = "vega20";
167                 break;
168         default:
169                 BUG();
170         }
171
172         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
173
174         err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
175         if (err)
176                 goto out;
177         err = amdgpu_ucode_validate(adev->pm.fw);
178         if (err)
179                 goto out;
180
181         hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
182         amdgpu_ucode_print_smc_hdr(&hdr->header);
183         adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
184
185         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
186                 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
187                 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
188                 ucode->fw = adev->pm.fw;
189                 header = (const struct common_firmware_header *)ucode->fw->data;
190                 adev->firmware.fw_size +=
191                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
192         }
193
194 out:
195         if (err) {
196                 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
197                           fw_name);
198                 release_firmware(adev->pm.fw);
199                 adev->pm.fw = NULL;
200         }
201         return err;
202 }
203
204 static int smu_v11_0_load_microcode(struct smu_context *smu)
205 {
206         return 0;
207 }
208
209 static int smu_v11_0_check_fw_status(struct smu_context *smu)
210 {
211         struct amdgpu_device *adev = smu->adev;
212         uint32_t mp1_fw_flags;
213
214         mp1_fw_flags = RREG32_PCIE(MP1_Public |
215                                    (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
216
217         if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
218             MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
219                 return 0;
220
221         return -EIO;
222 }
223
224 static int smu_v11_0_check_fw_version(struct smu_context *smu)
225 {
226         uint32_t if_version = 0xff, smu_version = 0xff;
227         uint16_t smu_major;
228         uint8_t smu_minor, smu_debug;
229         int ret = 0;
230
231         ret = smu_get_smc_version(smu, &if_version, &smu_version);
232         if (ret)
233                 return ret;
234
235         smu_major = (smu_version >> 16) & 0xffff;
236         smu_minor = (smu_version >> 8) & 0xff;
237         smu_debug = (smu_version >> 0) & 0xff;
238
239         pr_info("SMU Driver IF Version = 0x%08x, SMU FW Version = 0x%08x (%d.%d.%d)\n",
240                 if_version, smu_version, smu_major, smu_minor, smu_debug);
241
242         if (if_version != smu->smc_if_version) {
243                 pr_err("SMU driver if version not matched\n");
244                 ret = -EINVAL;
245         }
246
247         return ret;
248 }
249
250 static int smu_v11_0_read_pptable_from_vbios(struct smu_context *smu)
251 {
252         int ret, index;
253         uint16_t size;
254         uint8_t frev, crev;
255         void *table;
256
257         index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
258                                             powerplayinfo);
259
260         ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
261                                       (uint8_t **)&table);
262         if (ret)
263                 return ret;
264
265         if (!smu->smu_table.power_play_table)
266                 smu->smu_table.power_play_table = table;
267         if (!smu->smu_table.power_play_table_size)
268                 smu->smu_table.power_play_table_size = size;
269
270         return 0;
271 }
272
273 static int smu_v11_0_init_dpm_context(struct smu_context *smu)
274 {
275         struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
276
277         if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
278                 return -EINVAL;
279
280         return smu_alloc_dpm_context(smu);
281 }
282
283 static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
284 {
285         struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
286
287         if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
288                 return -EINVAL;
289
290         kfree(smu_dpm->dpm_context);
291         kfree(smu_dpm->golden_dpm_context);
292         kfree(smu_dpm->dpm_current_power_state);
293         kfree(smu_dpm->dpm_request_power_state);
294         smu_dpm->dpm_context = NULL;
295         smu_dpm->golden_dpm_context = NULL;
296         smu_dpm->dpm_context_size = 0;
297         smu_dpm->dpm_current_power_state = NULL;
298         smu_dpm->dpm_request_power_state = NULL;
299
300         return 0;
301 }
302
303 static int smu_v11_0_init_smc_tables(struct smu_context *smu)
304 {
305         struct smu_table_context *smu_table = &smu->smu_table;
306         struct smu_table *tables = NULL;
307         int ret = 0;
308
309         if (smu_table->tables || smu_table->table_count != 0)
310                 return -EINVAL;
311
312         tables = kcalloc(TABLE_COUNT, sizeof(struct smu_table), GFP_KERNEL);
313         if (!tables)
314                 return -ENOMEM;
315
316         smu_table->tables = tables;
317         smu_table->table_count = TABLE_COUNT;
318
319         SMU_TABLE_INIT(tables, TABLE_PPTABLE, sizeof(PPTable_t),
320                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
321         SMU_TABLE_INIT(tables, TABLE_WATERMARKS, sizeof(Watermarks_t),
322                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
323         SMU_TABLE_INIT(tables, TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
324                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
325         SMU_TABLE_INIT(tables, TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
326                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
327         SMU_TABLE_INIT(tables, TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, PAGE_SIZE,
328                        AMDGPU_GEM_DOMAIN_VRAM);
329         SMU_TABLE_INIT(tables, TABLE_ACTIVITY_MONITOR_COEFF,
330                        sizeof(DpmActivityMonitorCoeffInt_t),
331                        PAGE_SIZE,
332                        AMDGPU_GEM_DOMAIN_VRAM);
333
334         ret = smu_v11_0_init_dpm_context(smu);
335         if (ret)
336                 return ret;
337
338         return 0;
339 }
340
341 static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
342 {
343         struct smu_table_context *smu_table = &smu->smu_table;
344         int ret = 0;
345
346         if (!smu_table->tables || smu_table->table_count == 0)
347                 return -EINVAL;
348
349         kfree(smu_table->tables);
350         smu_table->tables = NULL;
351         smu_table->table_count = 0;
352
353         ret = smu_v11_0_fini_dpm_context(smu);
354         if (ret)
355                 return ret;
356         return 0;
357 }
358
359 static int smu_v11_0_init_power(struct smu_context *smu)
360 {
361         struct smu_power_context *smu_power = &smu->smu_power;
362
363         if (!smu->pm_enabled)
364                 return 0;
365         if (smu_power->power_context || smu_power->power_context_size != 0)
366                 return -EINVAL;
367
368         smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
369                                            GFP_KERNEL);
370         if (!smu_power->power_context)
371                 return -ENOMEM;
372         smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
373
374         smu->metrics_time = 0;
375         smu->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
376         if (!smu->metrics_table) {
377                 kfree(smu_power->power_context);
378                 return -ENOMEM;
379         }
380
381         return 0;
382 }
383
384 static int smu_v11_0_fini_power(struct smu_context *smu)
385 {
386         struct smu_power_context *smu_power = &smu->smu_power;
387
388         if (!smu->pm_enabled)
389                 return 0;
390         if (!smu_power->power_context || smu_power->power_context_size == 0)
391                 return -EINVAL;
392
393         kfree(smu->metrics_table);
394         kfree(smu_power->power_context);
395         smu->metrics_table = NULL;
396         smu_power->power_context = NULL;
397         smu_power->power_context_size = 0;
398
399         return 0;
400 }
401
402 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
403 {
404         int ret, index;
405         uint16_t size;
406         uint8_t frev, crev;
407         struct atom_common_table_header *header;
408         struct atom_firmware_info_v3_3 *v_3_3;
409         struct atom_firmware_info_v3_1 *v_3_1;
410
411         index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
412                                             firmwareinfo);
413
414         ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
415                                       (uint8_t **)&header);
416         if (ret)
417                 return ret;
418
419         if (header->format_revision != 3) {
420                 pr_err("unknown atom_firmware_info version! for smu11\n");
421                 return -EINVAL;
422         }
423
424         switch (header->content_revision) {
425         case 0:
426         case 1:
427         case 2:
428                 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
429                 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
430                 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
431                 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
432                 smu->smu_table.boot_values.socclk = 0;
433                 smu->smu_table.boot_values.dcefclk = 0;
434                 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
435                 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
436                 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
437                 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
438                 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
439                 smu->smu_table.boot_values.pp_table_id = 0;
440                 break;
441         case 3:
442         default:
443                 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
444                 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
445                 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
446                 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
447                 smu->smu_table.boot_values.socclk = 0;
448                 smu->smu_table.boot_values.dcefclk = 0;
449                 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
450                 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
451                 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
452                 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
453                 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
454                 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
455         }
456
457         return 0;
458 }
459
460 static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
461 {
462         int ret, index;
463         struct amdgpu_device *adev = smu->adev;
464         struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
465         struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
466
467         input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
468         input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
469         index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
470                                             getsmuclockinfo);
471
472         ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
473                                         (uint32_t *)&input);
474         if (ret)
475                 return -EINVAL;
476
477         output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
478         smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
479
480         memset(&input, 0, sizeof(input));
481         input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
482         input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
483         index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
484                                             getsmuclockinfo);
485
486         ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
487                                         (uint32_t *)&input);
488         if (ret)
489                 return -EINVAL;
490
491         output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
492         smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
493
494         memset(&input, 0, sizeof(input));
495         input.clk_id = SMU11_SYSPLL0_ECLK_ID;
496         input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
497         index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
498                                             getsmuclockinfo);
499
500         ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
501                                         (uint32_t *)&input);
502         if (ret)
503                 return -EINVAL;
504
505         output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
506         smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
507
508         memset(&input, 0, sizeof(input));
509         input.clk_id = SMU11_SYSPLL0_VCLK_ID;
510         input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
511         index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
512                                             getsmuclockinfo);
513
514         ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
515                                         (uint32_t *)&input);
516         if (ret)
517                 return -EINVAL;
518
519         output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
520         smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
521
522         memset(&input, 0, sizeof(input));
523         input.clk_id = SMU11_SYSPLL0_DCLK_ID;
524         input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
525         index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
526                                             getsmuclockinfo);
527
528         ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
529                                         (uint32_t *)&input);
530         if (ret)
531                 return -EINVAL;
532
533         output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
534         smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
535
536         return 0;
537 }
538
539 static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
540 {
541         struct smu_table_context *smu_table = &smu->smu_table;
542         struct smu_table *memory_pool = &smu_table->memory_pool;
543         int ret = 0;
544         uint64_t address;
545         uint32_t address_low, address_high;
546
547         if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
548                 return ret;
549
550         address = (uintptr_t)memory_pool->cpu_addr;
551         address_high = (uint32_t)upper_32_bits(address);
552         address_low  = (uint32_t)lower_32_bits(address);
553
554         ret = smu_send_smc_msg_with_param(smu,
555                                           SMU_MSG_SetSystemVirtualDramAddrHigh,
556                                           address_high);
557         if (ret)
558                 return ret;
559         ret = smu_send_smc_msg_with_param(smu,
560                                           SMU_MSG_SetSystemVirtualDramAddrLow,
561                                           address_low);
562         if (ret)
563                 return ret;
564
565         address = memory_pool->mc_address;
566         address_high = (uint32_t)upper_32_bits(address);
567         address_low  = (uint32_t)lower_32_bits(address);
568
569         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
570                                           address_high);
571         if (ret)
572                 return ret;
573         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
574                                           address_low);
575         if (ret)
576                 return ret;
577         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
578                                           (uint32_t)memory_pool->size);
579         if (ret)
580                 return ret;
581
582         return ret;
583 }
584
585 static int smu_v11_0_check_pptable(struct smu_context *smu)
586 {
587         int ret;
588
589         ret = smu_check_powerplay_table(smu);
590         return ret;
591 }
592
593 static int smu_v11_0_parse_pptable(struct smu_context *smu)
594 {
595         int ret;
596
597         struct smu_table_context *table_context = &smu->smu_table;
598
599         if (table_context->driver_pptable)
600                 return -EINVAL;
601
602         table_context->driver_pptable = kzalloc(sizeof(PPTable_t), GFP_KERNEL);
603
604         if (!table_context->driver_pptable)
605                 return -ENOMEM;
606
607         ret = smu_store_powerplay_table(smu);
608         if (ret)
609                 return -EINVAL;
610
611         ret = smu_append_powerplay_table(smu);
612
613         return ret;
614 }
615
616 static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
617 {
618         int ret;
619
620         ret = smu_set_default_dpm_table(smu);
621
622         return ret;
623 }
624
625 static int smu_v11_0_write_pptable(struct smu_context *smu)
626 {
627         struct smu_table_context *table_context = &smu->smu_table;
628         int ret = 0;
629
630         ret = smu_update_table(smu, TABLE_PPTABLE, table_context->driver_pptable, true);
631
632         return ret;
633 }
634
635 static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
636 {
637         return smu_update_table(smu, TABLE_WATERMARKS,
638                                 smu->smu_table.tables[TABLE_WATERMARKS].cpu_addr, true);
639 }
640
641 static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
642 {
643         int ret;
644
645         ret = smu_send_smc_msg_with_param(smu,
646                                           SMU_MSG_SetMinDeepSleepDcefclk, clk);
647         if (ret)
648                 pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
649
650         return ret;
651 }
652
653 static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
654 {
655         struct smu_table_context *table_context = &smu->smu_table;
656
657         if (!smu->pm_enabled)
658                 return 0;
659         if (!table_context)
660                 return -EINVAL;
661
662         return smu_set_deep_sleep_dcefclk(smu,
663                                           table_context->boot_values.dcefclk / 100);
664 }
665
666 static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
667 {
668         int ret = 0;
669         struct smu_table *tool_table = &smu->smu_table.tables[TABLE_PMSTATUSLOG];
670
671         if (tool_table->mc_address) {
672                 ret = smu_send_smc_msg_with_param(smu,
673                                 SMU_MSG_SetToolsDramAddrHigh,
674                                 upper_32_bits(tool_table->mc_address));
675                 if (!ret)
676                         ret = smu_send_smc_msg_with_param(smu,
677                                 SMU_MSG_SetToolsDramAddrLow,
678                                 lower_32_bits(tool_table->mc_address));
679         }
680
681         return ret;
682 }
683
684 static int smu_v11_0_init_display(struct smu_context *smu)
685 {
686         int ret = 0;
687
688         if (!smu->pm_enabled)
689                 return ret;
690         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
691         return ret;
692 }
693
694 static int smu_v11_0_update_feature_enable_state(struct smu_context *smu, uint32_t feature_id, bool enabled)
695 {
696         uint32_t feature_low = 0, feature_high = 0;
697         int ret = 0;
698
699         if (!smu->pm_enabled)
700                 return ret;
701         if (feature_id >= 0 && feature_id < 31)
702                 feature_low = (1 << feature_id);
703         else if (feature_id > 31 && feature_id < 63)
704                 feature_high = (1 << feature_id);
705         else
706                 return -EINVAL;
707
708         if (enabled) {
709                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
710                                                   feature_low);
711                 if (ret)
712                         return ret;
713                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
714                                                   feature_high);
715                 if (ret)
716                         return ret;
717
718         } else {
719                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
720                                                   feature_low);
721                 if (ret)
722                         return ret;
723                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
724                                                   feature_high);
725                 if (ret)
726                         return ret;
727
728         }
729
730         return ret;
731 }
732
733 static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
734 {
735         struct smu_feature *feature = &smu->smu_feature;
736         int ret = 0;
737         uint32_t feature_mask[2];
738
739         mutex_lock(&feature->mutex);
740         if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
741                 goto failed;
742
743         bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
744
745         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
746                                           feature_mask[1]);
747         if (ret)
748                 goto failed;
749
750         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
751                                           feature_mask[0]);
752         if (ret)
753                 goto failed;
754
755 failed:
756         mutex_unlock(&feature->mutex);
757         return ret;
758 }
759
760 static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
761                                       uint32_t *feature_mask, uint32_t num)
762 {
763         uint32_t feature_mask_high = 0, feature_mask_low = 0;
764         int ret = 0;
765
766         if (!feature_mask || num < 2)
767                 return -EINVAL;
768
769         ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
770         if (ret)
771                 return ret;
772         ret = smu_read_smc_arg(smu, &feature_mask_high);
773         if (ret)
774                 return ret;
775
776         ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
777         if (ret)
778                 return ret;
779         ret = smu_read_smc_arg(smu, &feature_mask_low);
780         if (ret)
781                 return ret;
782
783         feature_mask[0] = feature_mask_low;
784         feature_mask[1] = feature_mask_high;
785
786         return ret;
787 }
788
789 static bool smu_v11_0_is_dpm_running(struct smu_context *smu)
790 {
791         int ret = 0;
792         uint32_t feature_mask[2];
793         unsigned long feature_enabled;
794         ret = smu_v11_0_get_enabled_mask(smu, feature_mask, 2);
795         feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
796                            ((uint64_t)feature_mask[1] << 32));
797         return !!(feature_enabled & SMC_DPM_FEATURE);
798 }
799
800 static int smu_v11_0_system_features_control(struct smu_context *smu,
801                                              bool en)
802 {
803         struct smu_feature *feature = &smu->smu_feature;
804         uint32_t feature_mask[2];
805         int ret = 0;
806
807         if (smu->pm_enabled) {
808                 ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
809                                              SMU_MSG_DisableAllSmuFeatures));
810                 if (ret)
811                         return ret;
812         }
813
814         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
815         if (ret)
816                 return ret;
817
818         bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
819                     feature->feature_num);
820         bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
821                     feature->feature_num);
822
823         return ret;
824 }
825
826 static int smu_v11_0_notify_display_change(struct smu_context *smu)
827 {
828         int ret = 0;
829
830         if (!smu->pm_enabled)
831                 return ret;
832         if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT))
833             ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
834
835         return ret;
836 }
837
838 static int
839 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
840                                     PPCLK_e clock_select)
841 {
842         int ret = 0;
843
844         if (!smu->pm_enabled)
845                 return ret;
846         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
847                                           clock_select << 16);
848         if (ret) {
849                 pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
850                 return ret;
851         }
852
853         ret = smu_read_smc_arg(smu, clock);
854         if (ret)
855                 return ret;
856
857         if (*clock != 0)
858                 return 0;
859
860         /* if DC limit is zero, return AC limit */
861         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
862                                           clock_select << 16);
863         if (ret) {
864                 pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
865                 return ret;
866         }
867
868         ret = smu_read_smc_arg(smu, clock);
869
870         return ret;
871 }
872
873 static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
874 {
875         struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
876         int ret = 0;
877
878         max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
879                                          GFP_KERNEL);
880         smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
881
882         max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
883         max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
884         max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
885         max_sustainable_clocks->display_clock = 0xFFFFFFFF;
886         max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
887         max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
888
889         if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
890                 ret = smu_v11_0_get_max_sustainable_clock(smu,
891                                                           &(max_sustainable_clocks->uclock),
892                                                           PPCLK_UCLK);
893                 if (ret) {
894                         pr_err("[%s] failed to get max UCLK from SMC!",
895                                __func__);
896                         return ret;
897                 }
898         }
899
900         if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
901                 ret = smu_v11_0_get_max_sustainable_clock(smu,
902                                                           &(max_sustainable_clocks->soc_clock),
903                                                           PPCLK_SOCCLK);
904                 if (ret) {
905                         pr_err("[%s] failed to get max SOCCLK from SMC!",
906                                __func__);
907                         return ret;
908                 }
909         }
910
911         if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
912                 ret = smu_v11_0_get_max_sustainable_clock(smu,
913                                                           &(max_sustainable_clocks->dcef_clock),
914                                                           PPCLK_DCEFCLK);
915                 if (ret) {
916                         pr_err("[%s] failed to get max DCEFCLK from SMC!",
917                                __func__);
918                         return ret;
919                 }
920
921                 ret = smu_v11_0_get_max_sustainable_clock(smu,
922                                                           &(max_sustainable_clocks->display_clock),
923                                                           PPCLK_DISPCLK);
924                 if (ret) {
925                         pr_err("[%s] failed to get max DISPCLK from SMC!",
926                                __func__);
927                         return ret;
928                 }
929                 ret = smu_v11_0_get_max_sustainable_clock(smu,
930                                                           &(max_sustainable_clocks->phy_clock),
931                                                           PPCLK_PHYCLK);
932                 if (ret) {
933                         pr_err("[%s] failed to get max PHYCLK from SMC!",
934                                __func__);
935                         return ret;
936                 }
937                 ret = smu_v11_0_get_max_sustainable_clock(smu,
938                                                           &(max_sustainable_clocks->pixel_clock),
939                                                           PPCLK_PIXCLK);
940                 if (ret) {
941                         pr_err("[%s] failed to get max PIXCLK from SMC!",
942                                __func__);
943                         return ret;
944                 }
945         }
946
947         if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
948                 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
949
950         return 0;
951 }
952
953 static int smu_v11_0_get_power_limit(struct smu_context *smu,
954                                      uint32_t *limit,
955                                      bool get_default)
956 {
957         int ret = 0;
958
959         if (get_default) {
960                 mutex_lock(&smu->mutex);
961                 *limit = smu->default_power_limit;
962                 if (smu->od_enabled) {
963                         *limit *= (100 + smu->smu_table.TDPODLimit);
964                         *limit /= 100;
965                 }
966                 mutex_unlock(&smu->mutex);
967         } else {
968                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
969                                                   POWER_SOURCE_AC << 16);
970                 if (ret) {
971                         pr_err("[%s] get PPT limit failed!", __func__);
972                         return ret;
973                 }
974                 smu_read_smc_arg(smu, limit);
975                 smu->power_limit = *limit;
976         }
977
978         return ret;
979 }
980
981 static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
982 {
983         uint32_t max_power_limit;
984         int ret = 0;
985
986         if (n == 0)
987                 n = smu->default_power_limit;
988
989         max_power_limit = smu->default_power_limit;
990
991         if (smu->od_enabled) {
992                 max_power_limit *= (100 + smu->smu_table.TDPODLimit);
993                 max_power_limit /= 100;
994         }
995
996         if (smu_feature_is_enabled(smu, FEATURE_PPT_BIT))
997                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
998         if (ret) {
999                 pr_err("[%s] Set power limit Failed!", __func__);
1000                 return ret;
1001         }
1002
1003         return ret;
1004 }
1005
1006 static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, uint32_t clk_id, uint32_t *value)
1007 {
1008         int ret = 0;
1009         uint32_t freq;
1010
1011         if (clk_id >= PPCLK_COUNT || !value)
1012                 return -EINVAL;
1013
1014         ret = smu_send_smc_msg_with_param(smu,
1015                         SMU_MSG_GetDpmClockFreq, (clk_id << 16));
1016         if (ret)
1017                 return ret;
1018
1019         ret = smu_read_smc_arg(smu, &freq);
1020         if (ret)
1021                 return ret;
1022
1023         freq *= 100;
1024         *value = freq;
1025
1026         return ret;
1027 }
1028
1029 static int smu_v11_0_get_thermal_range(struct smu_context *smu,
1030                                 struct PP_TemperatureRange *range)
1031 {
1032         PPTable_t *pptable = smu->smu_table.driver_pptable;
1033         memcpy(range, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
1034
1035         range->max = pptable->TedgeLimit *
1036                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1037         range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
1038                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1039         range->hotspot_crit_max = pptable->ThotspotLimit *
1040                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1041         range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
1042                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1043         range->mem_crit_max = pptable->ThbmLimit *
1044                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1045         range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)*
1046                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1047
1048         return 0;
1049 }
1050
1051 static int smu_v11_0_set_thermal_range(struct smu_context *smu,
1052                         struct PP_TemperatureRange *range)
1053 {
1054         struct amdgpu_device *adev = smu->adev;
1055         int low = SMU11_THERMAL_MINIMUM_ALERT_TEMP *
1056                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1057         int high = SMU11_THERMAL_MAXIMUM_ALERT_TEMP *
1058                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1059         uint32_t val;
1060
1061         if (low < range->min)
1062                 low = range->min;
1063         if (high > range->max)
1064                 high = range->max;
1065
1066         if (low > high)
1067                 return -EINVAL;
1068
1069         val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1070         val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1071         val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1072         val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
1073         val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
1074         val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1075
1076         WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1077
1078         return 0;
1079 }
1080
1081 static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
1082 {
1083         struct amdgpu_device *adev = smu->adev;
1084         uint32_t val = 0;
1085
1086         val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1087         val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1088         val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1089
1090         WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
1091
1092         return 0;
1093 }
1094
1095 static int smu_v11_0_set_thermal_fan_table(struct smu_context *smu)
1096 {
1097         int ret;
1098         struct smu_table_context *table_context = &smu->smu_table;
1099         PPTable_t *pptable = table_context->driver_pptable;
1100
1101         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget,
1102                         (uint32_t)pptable->FanTargetTemperature);
1103
1104         return ret;
1105 }
1106
1107 static int smu_v11_0_start_thermal_control(struct smu_context *smu)
1108 {
1109         int ret = 0;
1110         struct PP_TemperatureRange range = {
1111                 TEMP_RANGE_MIN,
1112                 TEMP_RANGE_MAX,
1113                 TEMP_RANGE_MAX,
1114                 TEMP_RANGE_MIN,
1115                 TEMP_RANGE_MAX,
1116                 TEMP_RANGE_MAX,
1117                 TEMP_RANGE_MIN,
1118                 TEMP_RANGE_MAX,
1119                 TEMP_RANGE_MAX};
1120         struct amdgpu_device *adev = smu->adev;
1121
1122         if (!smu->pm_enabled)
1123                 return ret;
1124         smu_v11_0_get_thermal_range(smu, &range);
1125
1126         if (smu->smu_table.thermal_controller_type) {
1127                 ret = smu_v11_0_set_thermal_range(smu, &range);
1128                 if (ret)
1129                         return ret;
1130
1131                 ret = smu_v11_0_enable_thermal_alert(smu);
1132                 if (ret)
1133                         return ret;
1134                 ret = smu_v11_0_set_thermal_fan_table(smu);
1135                 if (ret)
1136                         return ret;
1137         }
1138
1139         adev->pm.dpm.thermal.min_temp = range.min;
1140         adev->pm.dpm.thermal.max_temp = range.max;
1141         adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
1142         adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
1143         adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
1144         adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
1145         adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
1146         adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
1147         adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
1148
1149         return ret;
1150 }
1151
1152 static int smu_v11_0_get_metrics_table(struct smu_context *smu,
1153                 SmuMetrics_t *metrics_table)
1154 {
1155         int ret = 0;
1156
1157         if (!smu->metrics_time || time_after(jiffies, smu->metrics_time + HZ / 1000)) {
1158                 ret = smu_update_table(smu, TABLE_SMU_METRICS,
1159                                 (void *)metrics_table, false);
1160                 if (ret) {
1161                         pr_info("Failed to export SMU metrics table!\n");
1162                         return ret;
1163                 }
1164                 memcpy(smu->metrics_table, metrics_table, sizeof(SmuMetrics_t));
1165                 smu->metrics_time = jiffies;
1166         } else
1167                 memcpy(metrics_table, smu->metrics_table, sizeof(SmuMetrics_t));
1168
1169         return ret;
1170 }
1171
1172 static int smu_v11_0_get_current_activity_percent(struct smu_context *smu,
1173                                                   enum amd_pp_sensors sensor,
1174                                                   uint32_t *value)
1175 {
1176         int ret = 0;
1177         SmuMetrics_t metrics;
1178
1179         if (!value)
1180                 return -EINVAL;
1181
1182         ret = smu_v11_0_get_metrics_table(smu, &metrics);
1183         if (ret)
1184                 return ret;
1185
1186         switch (sensor) {
1187         case AMDGPU_PP_SENSOR_GPU_LOAD:
1188                 *value = metrics.AverageGfxActivity;
1189                 break;
1190         case AMDGPU_PP_SENSOR_MEM_LOAD:
1191                 *value = metrics.AverageUclkActivity;
1192                 break;
1193         default:
1194                 pr_err("Invalid sensor for retrieving clock activity\n");
1195                 return -EINVAL;
1196         }
1197
1198         return 0;
1199 }
1200
1201 static int smu_v11_0_thermal_get_temperature(struct smu_context *smu,
1202                                              enum amd_pp_sensors sensor,
1203                                              uint32_t *value)
1204 {
1205         struct amdgpu_device *adev = smu->adev;
1206         SmuMetrics_t metrics;
1207         uint32_t temp = 0;
1208         int ret = 0;
1209
1210         if (!value)
1211                 return -EINVAL;
1212
1213         ret = smu_v11_0_get_metrics_table(smu, &metrics);
1214         if (ret)
1215                 return ret;
1216
1217         switch (sensor) {
1218         case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1219                 temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
1220                 temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
1221                                 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
1222
1223                 temp = temp & 0x1ff;
1224                 temp *= SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES;
1225
1226                 *value = temp;
1227                 break;
1228         case AMDGPU_PP_SENSOR_EDGE_TEMP:
1229                 *value = metrics.TemperatureEdge *
1230                         PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1231                 break;
1232         case AMDGPU_PP_SENSOR_MEM_TEMP:
1233                 *value = metrics.TemperatureHBM *
1234                         PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1235                 break;
1236         default:
1237                 pr_err("Invalid sensor for retrieving temp\n");
1238                 return -EINVAL;
1239         }
1240
1241         return 0;
1242 }
1243
1244 static int smu_v11_0_get_gpu_power(struct smu_context *smu, uint32_t *value)
1245 {
1246         int ret = 0;
1247         SmuMetrics_t metrics;
1248
1249         if (!value)
1250                 return -EINVAL;
1251
1252         ret = smu_v11_0_get_metrics_table(smu, &metrics);
1253         if (ret)
1254                 return ret;
1255
1256         *value = metrics.CurrSocketPower << 8;
1257
1258         return 0;
1259 }
1260
1261 static uint16_t convert_to_vddc(uint8_t vid)
1262 {
1263         return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
1264 }
1265
1266 static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1267 {
1268         struct amdgpu_device *adev = smu->adev;
1269         uint32_t vdd = 0, val_vid = 0;
1270
1271         if (!value)
1272                 return -EINVAL;
1273         val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
1274                 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1275                 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1276
1277         vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1278
1279         *value = vdd;
1280
1281         return 0;
1282
1283 }
1284
1285 static int smu_v11_0_read_sensor(struct smu_context *smu,
1286                                  enum amd_pp_sensors sensor,
1287                                  void *data, uint32_t *size)
1288 {
1289         struct smu_table_context *table_context = &smu->smu_table;
1290         PPTable_t *pptable = table_context->driver_pptable;
1291         int ret = 0;
1292         switch (sensor) {
1293         case AMDGPU_PP_SENSOR_GPU_LOAD:
1294         case AMDGPU_PP_SENSOR_MEM_LOAD:
1295                 ret = smu_v11_0_get_current_activity_percent(smu,
1296                                                              sensor,
1297                                                              (uint32_t *)data);
1298                 *size = 4;
1299                 break;
1300         case AMDGPU_PP_SENSOR_GFX_MCLK:
1301                 ret = smu_get_current_clk_freq(smu, PPCLK_UCLK, (uint32_t *)data);
1302                 *size = 4;
1303                 break;
1304         case AMDGPU_PP_SENSOR_GFX_SCLK:
1305                 ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, (uint32_t *)data);
1306                 *size = 4;
1307                 break;
1308         case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1309         case AMDGPU_PP_SENSOR_EDGE_TEMP:
1310         case AMDGPU_PP_SENSOR_MEM_TEMP:
1311                 ret = smu_v11_0_thermal_get_temperature(smu, sensor, (uint32_t *)data);
1312                 *size = 4;
1313                 break;
1314         case AMDGPU_PP_SENSOR_GPU_POWER:
1315                 ret = smu_v11_0_get_gpu_power(smu, (uint32_t *)data);
1316                 *size = 4;
1317                 break;
1318         case AMDGPU_PP_SENSOR_VDDGFX:
1319                 ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
1320                 *size = 4;
1321                 break;
1322         case AMDGPU_PP_SENSOR_UVD_POWER:
1323                 *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT) ? 1 : 0;
1324                 *size = 4;
1325                 break;
1326         case AMDGPU_PP_SENSOR_VCE_POWER:
1327                 *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT) ? 1 : 0;
1328                 *size = 4;
1329                 break;
1330         case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
1331                 *(uint32_t *)data = 0;
1332                 *size = 4;
1333                 break;
1334         case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
1335                 *(uint32_t *)data = pptable->FanMaximumRpm;
1336                 *size = 4;
1337                 break;
1338         default:
1339                 ret = smu_common_read_sensor(smu, sensor, data, size);
1340                 break;
1341         }
1342
1343         if (ret)
1344                 *size = 0;
1345
1346         return ret;
1347 }
1348
1349 static int
1350 smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
1351                                         struct pp_display_clock_request
1352                                         *clock_req)
1353 {
1354         enum amd_pp_clock_type clk_type = clock_req->clock_type;
1355         int ret = 0;
1356         PPCLK_e clk_select = 0;
1357         uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1358
1359         if (!smu->pm_enabled)
1360                 return -EINVAL;
1361         if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
1362                 switch (clk_type) {
1363                 case amd_pp_dcef_clock:
1364                         clk_select = PPCLK_DCEFCLK;
1365                         break;
1366                 case amd_pp_disp_clock:
1367                         clk_select = PPCLK_DISPCLK;
1368                         break;
1369                 case amd_pp_pixel_clock:
1370                         clk_select = PPCLK_PIXCLK;
1371                         break;
1372                 case amd_pp_phy_clock:
1373                         clk_select = PPCLK_PHYCLK;
1374                         break;
1375                 default:
1376                         pr_info("[%s] Invalid Clock Type!", __func__);
1377                         ret = -EINVAL;
1378                         break;
1379                 }
1380
1381                 if (ret)
1382                         goto failed;
1383
1384                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1385                                                   (clk_select << 16) | clk_freq);
1386         }
1387
1388 failed:
1389         return ret;
1390 }
1391
1392 static int smu_v11_0_set_watermarks_table(struct smu_context *smu,
1393                                           Watermarks_t *table, struct
1394                                           dm_pp_wm_sets_with_clock_ranges_soc15
1395                                           *clock_ranges)
1396 {
1397         int i;
1398
1399         if (!table || !clock_ranges)
1400                 return -EINVAL;
1401
1402         if (clock_ranges->num_wm_dmif_sets > 4 ||
1403             clock_ranges->num_wm_mcif_sets > 4)
1404                 return -EINVAL;
1405
1406         for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
1407                 table->WatermarkRow[1][i].MinClock =
1408                         cpu_to_le16((uint16_t)
1409                         (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
1410                         1000));
1411                 table->WatermarkRow[1][i].MaxClock =
1412                         cpu_to_le16((uint16_t)
1413                         (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
1414                         1000));
1415                 table->WatermarkRow[1][i].MinUclk =
1416                         cpu_to_le16((uint16_t)
1417                         (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1418                         1000));
1419                 table->WatermarkRow[1][i].MaxUclk =
1420                         cpu_to_le16((uint16_t)
1421                         (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1422                         1000));
1423                 table->WatermarkRow[1][i].WmSetting = (uint8_t)
1424                                 clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
1425         }
1426
1427         for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
1428                 table->WatermarkRow[0][i].MinClock =
1429                         cpu_to_le16((uint16_t)
1430                         (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
1431                         1000));
1432                 table->WatermarkRow[0][i].MaxClock =
1433                         cpu_to_le16((uint16_t)
1434                         (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
1435                         1000));
1436                 table->WatermarkRow[0][i].MinUclk =
1437                         cpu_to_le16((uint16_t)
1438                         (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1439                         1000));
1440                 table->WatermarkRow[0][i].MaxUclk =
1441                         cpu_to_le16((uint16_t)
1442                         (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1443                         1000));
1444                 table->WatermarkRow[0][i].WmSetting = (uint8_t)
1445                                 clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
1446         }
1447
1448         return 0;
1449 }
1450
1451 static int
1452 smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
1453                                           dm_pp_wm_sets_with_clock_ranges_soc15
1454                                           *clock_ranges)
1455 {
1456         int ret = 0;
1457         struct smu_table *watermarks = &smu->smu_table.tables[TABLE_WATERMARKS];
1458         Watermarks_t *table = watermarks->cpu_addr;
1459
1460         if (!smu->disable_watermark &&
1461             smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT) &&
1462             smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
1463                 smu_v11_0_set_watermarks_table(smu, table, clock_ranges);
1464                 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1465                 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1466         }
1467
1468         return ret;
1469 }
1470
1471 static int smu_v11_0_get_clock_ranges(struct smu_context *smu,
1472                                       uint32_t *clock,
1473                                       PPCLK_e clock_select,
1474                                       bool max)
1475 {
1476         int ret;
1477         *clock = 0;
1478         if (max) {
1479                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
1480                                             (clock_select << 16));
1481                 if (ret) {
1482                         pr_err("[GetClockRanges] Failed to get max clock from SMC!\n");
1483                         return ret;
1484                 }
1485                 smu_read_smc_arg(smu, clock);
1486         } else {
1487                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq,
1488                                             (clock_select << 16));
1489                 if (ret) {
1490                         pr_err("[GetClockRanges] Failed to get min clock from SMC!\n");
1491                         return ret;
1492                 }
1493                 smu_read_smc_arg(smu, clock);
1494         }
1495
1496         return 0;
1497 }
1498
1499 static uint32_t smu_v11_0_dpm_get_sclk(struct smu_context *smu, bool low)
1500 {
1501         uint32_t gfx_clk;
1502         int ret;
1503
1504         if (!smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
1505                 pr_err("[GetSclks]: gfxclk dpm not enabled!\n");
1506                 return -EPERM;
1507         }
1508
1509         if (low) {
1510                 ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, false);
1511                 if (ret) {
1512                         pr_err("[GetSclks]: fail to get min PPCLK_GFXCLK\n");
1513                         return ret;
1514                 }
1515         } else {
1516                 ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, true);
1517                 if (ret) {
1518                         pr_err("[GetSclks]: fail to get max PPCLK_GFXCLK\n");
1519                         return ret;
1520                 }
1521         }
1522
1523         return (gfx_clk * 100);
1524 }
1525
1526 static uint32_t smu_v11_0_dpm_get_mclk(struct smu_context *smu, bool low)
1527 {
1528         uint32_t mem_clk;
1529         int ret;
1530
1531         if (!smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
1532                 pr_err("[GetMclks]: memclk dpm not enabled!\n");
1533                 return -EPERM;
1534         }
1535
1536         if (low) {
1537                 ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_UCLK, false);
1538                 if (ret) {
1539                         pr_err("[GetMclks]: fail to get min PPCLK_UCLK\n");
1540                         return ret;
1541                 }
1542         } else {
1543                 ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_GFXCLK, true);
1544                 if (ret) {
1545                         pr_err("[GetMclks]: fail to get max PPCLK_UCLK\n");
1546                         return ret;
1547                 }
1548         }
1549
1550         return (mem_clk * 100);
1551 }
1552
1553 static int smu_v11_0_set_od8_default_settings(struct smu_context *smu,
1554                                               bool initialize)
1555 {
1556         struct smu_table_context *table_context = &smu->smu_table;
1557         int ret;
1558
1559         if (initialize) {
1560                 if (table_context->overdrive_table)
1561                         return -EINVAL;
1562
1563                 table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL);
1564
1565                 if (!table_context->overdrive_table)
1566                         return -ENOMEM;
1567
1568                 ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, false);
1569                 if (ret) {
1570                         pr_err("Failed to export over drive table!\n");
1571                         return ret;
1572                 }
1573
1574                 smu_set_default_od8_settings(smu);
1575         }
1576
1577         ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, true);
1578         if (ret) {
1579                 pr_err("Failed to import over drive table!\n");
1580                 return ret;
1581         }
1582
1583         return 0;
1584 }
1585
1586 static int smu_v11_0_conv_power_profile_to_pplib_workload(int power_profile)
1587 {
1588         int pplib_workload = 0;
1589
1590         switch (power_profile) {
1591         case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
1592              pplib_workload = WORKLOAD_DEFAULT_BIT;
1593              break;
1594         case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
1595              pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
1596              break;
1597         case PP_SMC_POWER_PROFILE_POWERSAVING:
1598              pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
1599              break;
1600         case PP_SMC_POWER_PROFILE_VIDEO:
1601              pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
1602              break;
1603         case PP_SMC_POWER_PROFILE_VR:
1604              pplib_workload = WORKLOAD_PPLIB_VR_BIT;
1605              break;
1606         case PP_SMC_POWER_PROFILE_COMPUTE:
1607              pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
1608              break;
1609         case PP_SMC_POWER_PROFILE_CUSTOM:
1610                 pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
1611                 break;
1612         }
1613
1614         return pplib_workload;
1615 }
1616
1617 static int smu_v11_0_get_power_profile_mode(struct smu_context *smu, char *buf)
1618 {
1619         DpmActivityMonitorCoeffInt_t activity_monitor;
1620         uint32_t i, size = 0;
1621         uint16_t workload_type = 0;
1622         static const char *profile_name[] = {
1623                                         "BOOTUP_DEFAULT",
1624                                         "3D_FULL_SCREEN",
1625                                         "POWER_SAVING",
1626                                         "VIDEO",
1627                                         "VR",
1628                                         "COMPUTE",
1629                                         "CUSTOM"};
1630         static const char *title[] = {
1631                         "PROFILE_INDEX(NAME)",
1632                         "CLOCK_TYPE(NAME)",
1633                         "FPS",
1634                         "UseRlcBusy",
1635                         "MinActiveFreqType",
1636                         "MinActiveFreq",
1637                         "BoosterFreqType",
1638                         "BoosterFreq",
1639                         "PD_Data_limit_c",
1640                         "PD_Data_error_coeff",
1641                         "PD_Data_error_rate_coeff"};
1642         int result = 0;
1643
1644         if (!smu->pm_enabled || !buf)
1645                 return -EINVAL;
1646
1647         size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
1648                         title[0], title[1], title[2], title[3], title[4], title[5],
1649                         title[6], title[7], title[8], title[9], title[10]);
1650
1651         for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1652                 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1653                 workload_type = smu_v11_0_conv_power_profile_to_pplib_workload(i);
1654                 result = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
1655                                                    workload_type, &activity_monitor, false);
1656                 if (result) {
1657                         pr_err("[%s] Failed to get activity monitor!", __func__);
1658                         return result;
1659                 }
1660
1661                 size += sprintf(buf + size, "%2d %14s%s:\n",
1662                         i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1663
1664                 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1665                         " ",
1666                         0,
1667                         "GFXCLK",
1668                         activity_monitor.Gfx_FPS,
1669                         activity_monitor.Gfx_UseRlcBusy,
1670                         activity_monitor.Gfx_MinActiveFreqType,
1671                         activity_monitor.Gfx_MinActiveFreq,
1672                         activity_monitor.Gfx_BoosterFreqType,
1673                         activity_monitor.Gfx_BoosterFreq,
1674                         activity_monitor.Gfx_PD_Data_limit_c,
1675                         activity_monitor.Gfx_PD_Data_error_coeff,
1676                         activity_monitor.Gfx_PD_Data_error_rate_coeff);
1677
1678                 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1679                         " ",
1680                         1,
1681                         "SOCCLK",
1682                         activity_monitor.Soc_FPS,
1683                         activity_monitor.Soc_UseRlcBusy,
1684                         activity_monitor.Soc_MinActiveFreqType,
1685                         activity_monitor.Soc_MinActiveFreq,
1686                         activity_monitor.Soc_BoosterFreqType,
1687                         activity_monitor.Soc_BoosterFreq,
1688                         activity_monitor.Soc_PD_Data_limit_c,
1689                         activity_monitor.Soc_PD_Data_error_coeff,
1690                         activity_monitor.Soc_PD_Data_error_rate_coeff);
1691
1692                 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1693                         " ",
1694                         2,
1695                         "UCLK",
1696                         activity_monitor.Mem_FPS,
1697                         activity_monitor.Mem_UseRlcBusy,
1698                         activity_monitor.Mem_MinActiveFreqType,
1699                         activity_monitor.Mem_MinActiveFreq,
1700                         activity_monitor.Mem_BoosterFreqType,
1701                         activity_monitor.Mem_BoosterFreq,
1702                         activity_monitor.Mem_PD_Data_limit_c,
1703                         activity_monitor.Mem_PD_Data_error_coeff,
1704                         activity_monitor.Mem_PD_Data_error_rate_coeff);
1705
1706                 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1707                         " ",
1708                         3,
1709                         "FCLK",
1710                         activity_monitor.Fclk_FPS,
1711                         activity_monitor.Fclk_UseRlcBusy,
1712                         activity_monitor.Fclk_MinActiveFreqType,
1713                         activity_monitor.Fclk_MinActiveFreq,
1714                         activity_monitor.Fclk_BoosterFreqType,
1715                         activity_monitor.Fclk_BoosterFreq,
1716                         activity_monitor.Fclk_PD_Data_limit_c,
1717                         activity_monitor.Fclk_PD_Data_error_coeff,
1718                         activity_monitor.Fclk_PD_Data_error_rate_coeff);
1719         }
1720
1721         return size;
1722 }
1723
1724 static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1725 {
1726         DpmActivityMonitorCoeffInt_t activity_monitor;
1727         int workload_type = 0, ret = 0;
1728
1729         smu->power_profile_mode = input[size];
1730
1731         if (!smu->pm_enabled)
1732                 return ret;
1733         if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
1734                 pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
1735                 return -EINVAL;
1736         }
1737
1738         if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1739                 ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
1740                                                 WORKLOAD_PPLIB_CUSTOM_BIT, &activity_monitor, false);
1741                 if (ret) {
1742                         pr_err("[%s] Failed to get activity monitor!", __func__);
1743                         return ret;
1744                 }
1745
1746                 switch (input[0]) {
1747                 case 0: /* Gfxclk */
1748                         activity_monitor.Gfx_FPS = input[1];
1749                         activity_monitor.Gfx_UseRlcBusy = input[2];
1750                         activity_monitor.Gfx_MinActiveFreqType = input[3];
1751                         activity_monitor.Gfx_MinActiveFreq = input[4];
1752                         activity_monitor.Gfx_BoosterFreqType = input[5];
1753                         activity_monitor.Gfx_BoosterFreq = input[6];
1754                         activity_monitor.Gfx_PD_Data_limit_c = input[7];
1755                         activity_monitor.Gfx_PD_Data_error_coeff = input[8];
1756                         activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
1757                         break;
1758                 case 1: /* Socclk */
1759                         activity_monitor.Soc_FPS = input[1];
1760                         activity_monitor.Soc_UseRlcBusy = input[2];
1761                         activity_monitor.Soc_MinActiveFreqType = input[3];
1762                         activity_monitor.Soc_MinActiveFreq = input[4];
1763                         activity_monitor.Soc_BoosterFreqType = input[5];
1764                         activity_monitor.Soc_BoosterFreq = input[6];
1765                         activity_monitor.Soc_PD_Data_limit_c = input[7];
1766                         activity_monitor.Soc_PD_Data_error_coeff = input[8];
1767                         activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
1768                         break;
1769                 case 2: /* Uclk */
1770                         activity_monitor.Mem_FPS = input[1];
1771                         activity_monitor.Mem_UseRlcBusy = input[2];
1772                         activity_monitor.Mem_MinActiveFreqType = input[3];
1773                         activity_monitor.Mem_MinActiveFreq = input[4];
1774                         activity_monitor.Mem_BoosterFreqType = input[5];
1775                         activity_monitor.Mem_BoosterFreq = input[6];
1776                         activity_monitor.Mem_PD_Data_limit_c = input[7];
1777                         activity_monitor.Mem_PD_Data_error_coeff = input[8];
1778                         activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
1779                         break;
1780                 case 3: /* Fclk */
1781                         activity_monitor.Fclk_FPS = input[1];
1782                         activity_monitor.Fclk_UseRlcBusy = input[2];
1783                         activity_monitor.Fclk_MinActiveFreqType = input[3];
1784                         activity_monitor.Fclk_MinActiveFreq = input[4];
1785                         activity_monitor.Fclk_BoosterFreqType = input[5];
1786                         activity_monitor.Fclk_BoosterFreq = input[6];
1787                         activity_monitor.Fclk_PD_Data_limit_c = input[7];
1788                         activity_monitor.Fclk_PD_Data_error_coeff = input[8];
1789                         activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
1790                         break;
1791                 }
1792
1793                 ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
1794                                                 WORKLOAD_PPLIB_COMPUTE_BIT, &activity_monitor, true);
1795                 if (ret) {
1796                         pr_err("[%s] Failed to set activity monitor!", __func__);
1797                         return ret;
1798                 }
1799         }
1800
1801         /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1802         workload_type =
1803                 smu_v11_0_conv_power_profile_to_pplib_workload(smu->power_profile_mode);
1804         smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1805                                     1 << workload_type);
1806
1807         return ret;
1808 }
1809
1810 static int smu_v11_0_update_od8_settings(struct smu_context *smu,
1811                                         uint32_t index,
1812                                         uint32_t value)
1813 {
1814         struct smu_table_context *table_context = &smu->smu_table;
1815         int ret;
1816
1817         ret = smu_update_table(smu, TABLE_OVERDRIVE,
1818                                table_context->overdrive_table, false);
1819         if (ret) {
1820                 pr_err("Failed to export over drive table!\n");
1821                 return ret;
1822         }
1823
1824         smu_update_specified_od8_value(smu, index, value);
1825
1826         ret = smu_update_table(smu, TABLE_OVERDRIVE,
1827                                table_context->overdrive_table, true);
1828         if (ret) {
1829                 pr_err("Failed to import over drive table!\n");
1830                 return ret;
1831         }
1832
1833         return 0;
1834 }
1835
1836 static int smu_v11_0_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
1837 {
1838         if (!smu_feature_is_supported(smu, FEATURE_DPM_UVD_BIT))
1839                 return 0;
1840
1841         if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT))
1842                 return 0;
1843
1844         return smu_feature_set_enabled(smu, FEATURE_DPM_UVD_BIT, enable);
1845 }
1846
1847 static int smu_v11_0_dpm_set_vce_enable(struct smu_context *smu, bool enable)
1848 {
1849         if (!smu_feature_is_supported(smu, FEATURE_DPM_VCE_BIT))
1850                 return 0;
1851
1852         if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT))
1853                 return 0;
1854
1855         return smu_feature_set_enabled(smu, FEATURE_DPM_VCE_BIT, enable);
1856 }
1857
1858 static int smu_v11_0_get_current_rpm(struct smu_context *smu,
1859                                      uint32_t *current_rpm)
1860 {
1861         int ret;
1862
1863         ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
1864
1865         if (ret) {
1866                 pr_err("Attempt to get current RPM from SMC Failed!\n");
1867                 return ret;
1868         }
1869
1870         smu_read_smc_arg(smu, current_rpm);
1871
1872         return 0;
1873 }
1874
1875 static uint32_t
1876 smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1877 {
1878         if (!smu_feature_is_enabled(smu, FEATURE_FAN_CONTROL_BIT))
1879                 return AMD_FAN_CTRL_MANUAL;
1880         else
1881                 return AMD_FAN_CTRL_AUTO;
1882 }
1883
1884 static int
1885 smu_v11_0_get_fan_speed_percent(struct smu_context *smu,
1886                                            uint32_t *speed)
1887 {
1888         int ret = 0;
1889         uint32_t percent = 0;
1890         uint32_t current_rpm;
1891         PPTable_t *pptable = smu->smu_table.driver_pptable;
1892
1893         ret = smu_v11_0_get_current_rpm(smu, &current_rpm);
1894         percent = current_rpm * 100 / pptable->FanMaximumRpm;
1895         *speed = percent > 100 ? 100 : percent;
1896
1897         return ret;
1898 }
1899
1900 static int
1901 smu_v11_0_smc_fan_control(struct smu_context *smu, bool start)
1902 {
1903         int ret = 0;
1904
1905         if (smu_feature_is_supported(smu, FEATURE_FAN_CONTROL_BIT))
1906                 return 0;
1907
1908         ret = smu_feature_set_enabled(smu, FEATURE_FAN_CONTROL_BIT, start);
1909         if (ret)
1910                 pr_err("[%s]%s smc FAN CONTROL feature failed!",
1911                        __func__, (start ? "Start" : "Stop"));
1912
1913         return ret;
1914 }
1915
1916 static int
1917 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1918 {
1919         struct amdgpu_device *adev = smu->adev;
1920
1921         WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1922                      REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1923                                    CG_FDO_CTRL2, TMIN, 0));
1924         WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1925                      REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1926                                    CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1927
1928         return 0;
1929 }
1930
1931 static int
1932 smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
1933 {
1934         struct amdgpu_device *adev = smu->adev;
1935         uint32_t duty100;
1936         uint32_t duty;
1937         uint64_t tmp64;
1938         bool stop = 0;
1939
1940         if (speed > 100)
1941                 speed = 100;
1942
1943         if (smu_v11_0_smc_fan_control(smu, stop))
1944                 return -EINVAL;
1945         duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
1946                                 CG_FDO_CTRL1, FMAX_DUTY100);
1947         if (!duty100)
1948                 return -EINVAL;
1949
1950         tmp64 = (uint64_t)speed * duty100;
1951         do_div(tmp64, 100);
1952         duty = (uint32_t)tmp64;
1953
1954         WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
1955                      REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
1956                                    CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1957
1958         return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1959 }
1960
1961 static int
1962 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
1963                                uint32_t mode)
1964 {
1965         int ret = 0;
1966         bool start = 1;
1967         bool stop  = 0;
1968
1969         switch (mode) {
1970         case AMD_FAN_CTRL_NONE:
1971                 ret = smu_v11_0_set_fan_speed_percent(smu, 100);
1972                 break;
1973         case AMD_FAN_CTRL_MANUAL:
1974                 ret = smu_v11_0_smc_fan_control(smu, stop);
1975                 break;
1976         case AMD_FAN_CTRL_AUTO:
1977                 ret = smu_v11_0_smc_fan_control(smu, start);
1978                 break;
1979         default:
1980                 break;
1981         }
1982
1983         if (ret) {
1984                 pr_err("[%s]Set fan control mode failed!", __func__);
1985                 return -EINVAL;
1986         }
1987
1988         return ret;
1989 }
1990
1991 static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
1992                                        uint32_t speed)
1993 {
1994         struct amdgpu_device *adev = smu->adev;
1995         int ret;
1996         uint32_t tach_period, crystal_clock_freq;
1997         bool stop = 0;
1998
1999         if (!speed)
2000                 return -EINVAL;
2001
2002         mutex_lock(&(smu->mutex));
2003         ret = smu_v11_0_smc_fan_control(smu, stop);
2004         if (ret)
2005                 goto set_fan_speed_rpm_failed;
2006
2007         crystal_clock_freq = amdgpu_asic_get_xclk(adev);
2008         tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
2009         WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
2010                      REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
2011                                    CG_TACH_CTRL, TARGET_PERIOD,
2012                                    tach_period));
2013
2014         ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
2015
2016 set_fan_speed_rpm_failed:
2017         mutex_unlock(&(smu->mutex));
2018         return ret;
2019 }
2020
2021 static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
2022                                      uint32_t pstate)
2023 {
2024         int ret = 0;
2025         mutex_lock(&(smu->mutex));
2026         ret = smu_send_smc_msg_with_param(smu,
2027                                           SMU_MSG_SetXgmiMode,
2028                                           pstate ? XGMI_STATE_D0 : XGMI_STATE_D3);
2029         mutex_unlock(&(smu->mutex));
2030         return ret;
2031 }
2032
2033 static const struct smu_funcs smu_v11_0_funcs = {
2034         .init_microcode = smu_v11_0_init_microcode,
2035         .load_microcode = smu_v11_0_load_microcode,
2036         .check_fw_status = smu_v11_0_check_fw_status,
2037         .check_fw_version = smu_v11_0_check_fw_version,
2038         .send_smc_msg = smu_v11_0_send_msg,
2039         .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
2040         .read_smc_arg = smu_v11_0_read_arg,
2041         .read_pptable_from_vbios = smu_v11_0_read_pptable_from_vbios,
2042         .init_smc_tables = smu_v11_0_init_smc_tables,
2043         .fini_smc_tables = smu_v11_0_fini_smc_tables,
2044         .init_power = smu_v11_0_init_power,
2045         .fini_power = smu_v11_0_fini_power,
2046         .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
2047         .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
2048         .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
2049         .check_pptable = smu_v11_0_check_pptable,
2050         .parse_pptable = smu_v11_0_parse_pptable,
2051         .populate_smc_pptable = smu_v11_0_populate_smc_pptable,
2052         .write_pptable = smu_v11_0_write_pptable,
2053         .write_watermarks_table = smu_v11_0_write_watermarks_table,
2054         .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
2055         .set_tool_table_location = smu_v11_0_set_tool_table_location,
2056         .init_display = smu_v11_0_init_display,
2057         .set_allowed_mask = smu_v11_0_set_allowed_mask,
2058         .get_enabled_mask = smu_v11_0_get_enabled_mask,
2059         .is_dpm_running = smu_v11_0_is_dpm_running,
2060         .system_features_control = smu_v11_0_system_features_control,
2061         .update_feature_enable_state = smu_v11_0_update_feature_enable_state,
2062         .notify_display_change = smu_v11_0_notify_display_change,
2063         .get_power_limit = smu_v11_0_get_power_limit,
2064         .set_power_limit = smu_v11_0_set_power_limit,
2065         .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
2066         .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
2067         .start_thermal_control = smu_v11_0_start_thermal_control,
2068         .read_sensor = smu_v11_0_read_sensor,
2069         .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
2070         .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
2071         .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
2072         .get_sclk = smu_v11_0_dpm_get_sclk,
2073         .get_mclk = smu_v11_0_dpm_get_mclk,
2074         .set_od8_default_settings = smu_v11_0_set_od8_default_settings,
2075         .conv_power_profile_to_pplib_workload = smu_v11_0_conv_power_profile_to_pplib_workload,
2076         .get_power_profile_mode = smu_v11_0_get_power_profile_mode,
2077         .set_power_profile_mode = smu_v11_0_set_power_profile_mode,
2078         .update_od8_settings = smu_v11_0_update_od8_settings,
2079         .dpm_set_uvd_enable = smu_v11_0_dpm_set_uvd_enable,
2080         .dpm_set_vce_enable = smu_v11_0_dpm_set_vce_enable,
2081         .get_current_rpm = smu_v11_0_get_current_rpm,
2082         .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
2083         .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
2084         .get_fan_speed_percent = smu_v11_0_get_fan_speed_percent,
2085         .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
2086         .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
2087         .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
2088 };
2089
2090 void smu_v11_0_set_smu_funcs(struct smu_context *smu)
2091 {
2092         struct amdgpu_device *adev = smu->adev;
2093
2094         smu->funcs = &smu_v11_0_funcs;
2095         switch (adev->asic_type) {
2096         case CHIP_VEGA20:
2097                 vega20_set_ppt_funcs(smu);
2098                 break;
2099         default:
2100                 pr_warn("Unknown asic for smu11\n");
2101         }
2102 }