]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/powerplay/smu_v12_0.c
drm/amd/powerplay: split out those internal used swSMU APIs V2
[linux.git] / drivers / gpu / drm / amd / powerplay / smu_v12_0.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include "pp_debug.h"
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_smu.h"
27 #include "smu_internal.h"
28 #include "atomfirmware.h"
29 #include "amdgpu_atomfirmware.h"
30 #include "smu_v12_0.h"
31 #include "soc15_common.h"
32 #include "atom.h"
33 #include "renoir_ppt.h"
34
35 #include "asic_reg/mp/mp_12_0_0_offset.h"
36 #include "asic_reg/mp/mp_12_0_0_sh_mask.h"
37
38 #define smnMP1_FIRMWARE_FLAGS                                0x3010024
39
40 #define mmSMUIO_GFX_MISC_CNTL                                0x00c8
41 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX                       0
42 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK          0x00000006L
43 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT        0x1
44
45 static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
46                                               uint16_t msg)
47 {
48         struct amdgpu_device *adev = smu->adev;
49
50         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
51         return 0;
52 }
53
54 static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
55 {
56         struct amdgpu_device *adev = smu->adev;
57
58         *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
59         return 0;
60 }
61
62 static int smu_v12_0_wait_for_response(struct smu_context *smu)
63 {
64         struct amdgpu_device *adev = smu->adev;
65         uint32_t cur_value, i;
66
67         for (i = 0; i < adev->usec_timeout; i++) {
68                 cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
69                 if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
70                         break;
71                 udelay(1);
72         }
73
74         /* timeout means wrong logic */
75         if (i == adev->usec_timeout)
76                 return -ETIME;
77
78         return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
79 }
80
81 static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
82 {
83         struct amdgpu_device *adev = smu->adev;
84         int ret = 0, index = 0;
85
86         index = smu_msg_get_index(smu, msg);
87         if (index < 0)
88                 return index;
89
90         smu_v12_0_wait_for_response(smu);
91
92         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
93
94         smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
95
96         ret = smu_v12_0_wait_for_response(smu);
97
98         if (ret)
99                 pr_err("Failed to send message 0x%x, response 0x%x\n", index,
100                        ret);
101
102         return ret;
103
104 }
105
106 static int
107 smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
108                               uint32_t param)
109 {
110         struct amdgpu_device *adev = smu->adev;
111         int ret = 0, index = 0;
112
113         index = smu_msg_get_index(smu, msg);
114         if (index < 0)
115                 return index;
116
117         ret = smu_v12_0_wait_for_response(smu);
118         if (ret)
119                 pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n",
120                        index, ret, param);
121
122         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
123
124         WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
125
126         smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
127
128         ret = smu_v12_0_wait_for_response(smu);
129         if (ret)
130                 pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
131                        index, ret, param);
132
133         return ret;
134 }
135
136 static int smu_v12_0_check_fw_status(struct smu_context *smu)
137 {
138         struct amdgpu_device *adev = smu->adev;
139         uint32_t mp1_fw_flags;
140
141         mp1_fw_flags = RREG32_PCIE(MP1_Public |
142                 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
143
144         if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
145                 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
146                 return 0;
147
148         return -EIO;
149 }
150
151 static int smu_v12_0_check_fw_version(struct smu_context *smu)
152 {
153         uint32_t if_version = 0xff, smu_version = 0xff;
154         uint16_t smu_major;
155         uint8_t smu_minor, smu_debug;
156         int ret = 0;
157
158         ret = smu_get_smc_version(smu, &if_version, &smu_version);
159         if (ret)
160                 return ret;
161
162         smu_major = (smu_version >> 16) & 0xffff;
163         smu_minor = (smu_version >> 8) & 0xff;
164         smu_debug = (smu_version >> 0) & 0xff;
165
166         /*
167          * 1. if_version mismatch is not critical as our fw is designed
168          * to be backward compatible.
169          * 2. New fw usually brings some optimizations. But that's visible
170          * only on the paired driver.
171          * Considering above, we just leave user a warning message instead
172          * of halt driver loading.
173          */
174         if (if_version != smu->smc_if_version) {
175                 pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
176                         "smu fw version = 0x%08x (%d.%d.%d)\n",
177                         smu->smc_if_version, if_version,
178                         smu_version, smu_major, smu_minor, smu_debug);
179                 pr_warn("SMU driver if version not matched\n");
180         }
181
182         return ret;
183 }
184
185 static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
186 {
187         if (!(smu->adev->flags & AMD_IS_APU))
188                 return 0;
189
190         if (gate)
191                 return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma);
192         else
193                 return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
194 }
195
196 static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
197 {
198         if (!(smu->adev->flags & AMD_IS_APU))
199                 return 0;
200
201         if (gate)
202                 return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
203         else
204                 return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
205 }
206
207 static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
208 {
209         if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
210                 return 0;
211
212         return smu_v12_0_send_msg_with_param(smu,
213                 SMU_MSG_SetGfxCGPG, enable ? 1 : 0);
214 }
215
216 /**
217  * smu_v12_0_get_gfxoff_status - get gfxoff status
218  *
219  * @smu: amdgpu_device pointer
220  *
221  * This function will be used to get gfxoff status
222  *
223  * Returns 0=GFXOFF(default).
224  * Returns 1=Transition out of GFX State.
225  * Returns 2=Not in GFXOFF.
226  * Returns 3=Transition into GFXOFF.
227  */
228 static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
229 {
230         uint32_t reg;
231         uint32_t gfxOff_Status = 0;
232         struct amdgpu_device *adev = smu->adev;
233
234         reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
235         gfxOff_Status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
236                 >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
237
238         return gfxOff_Status;
239 }
240
241 static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
242 {
243         int ret = 0, timeout = 500;
244
245         if (enable) {
246                 ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
247
248         } else {
249                 ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
250
251                 /* confirm gfx is back to "on" state, timeout is 0.5 second */
252                 while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
253                         msleep(1);
254                         timeout--;
255                         if (timeout == 0) {
256                                 DRM_ERROR("disable gfxoff timeout and failed!\n");
257                                 break;
258                         }
259                 }
260         }
261
262         return ret;
263 }
264
265 static int smu_v12_0_init_smc_tables(struct smu_context *smu)
266 {
267         struct smu_table_context *smu_table = &smu->smu_table;
268         struct smu_table *tables = NULL;
269
270         if (smu_table->tables)
271                 return -EINVAL;
272
273         tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
274                          GFP_KERNEL);
275         if (!tables)
276                 return -ENOMEM;
277
278         smu_table->tables = tables;
279
280         return smu_tables_init(smu, tables);
281 }
282
283 static int smu_v12_0_fini_smc_tables(struct smu_context *smu)
284 {
285         struct smu_table_context *smu_table = &smu->smu_table;
286
287         if (!smu_table->tables)
288                 return -EINVAL;
289
290         kfree(smu_table->clocks_table);
291         kfree(smu_table->tables);
292
293         smu_table->clocks_table = NULL;
294         smu_table->tables = NULL;
295
296         return 0;
297 }
298
299 static int smu_v12_0_populate_smc_tables(struct smu_context *smu)
300 {
301         struct smu_table_context *smu_table = &smu->smu_table;
302         struct smu_table *table = NULL;
303
304         table = &smu_table->tables[SMU_TABLE_DPMCLOCKS];
305         if (!table)
306                 return -EINVAL;
307
308         if (!table->cpu_addr)
309                 return -EINVAL;
310
311         return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
312 }
313
314 static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
315                                                  uint32_t *min, uint32_t *max)
316 {
317         int ret = 0;
318         uint32_t mclk_mask, soc_mask;
319
320         if (max) {
321                 ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
322                                                  NULL,
323                                                  &mclk_mask,
324                                                  &soc_mask);
325                 if (ret)
326                         goto failed;
327
328                 switch (clk_type) {
329                 case SMU_GFXCLK:
330                 case SMU_SCLK:
331                         ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency);
332                         if (ret) {
333                                 pr_err("Attempt to get max GX frequency from SMC Failed !\n");
334                                 goto failed;
335                         }
336                         ret = smu_read_smc_arg(smu, max);
337                         if (ret)
338                                 goto failed;
339                         break;
340                 case SMU_UCLK:
341                 case SMU_FCLK:
342                 case SMU_MCLK:
343                         ret = smu_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
344                         if (ret)
345                                 goto failed;
346                         break;
347                 case SMU_SOCCLK:
348                         ret = smu_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
349                         if (ret)
350                                 goto failed;
351                         break;
352                 default:
353                         ret = -EINVAL;
354                         goto failed;
355                 }
356         }
357
358         if (min) {
359                 switch (clk_type) {
360                 case SMU_GFXCLK:
361                 case SMU_SCLK:
362                         ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency);
363                         if (ret) {
364                                 pr_err("Attempt to get min GX frequency from SMC Failed !\n");
365                                 goto failed;
366                         }
367                         ret = smu_read_smc_arg(smu, min);
368                         if (ret)
369                                 goto failed;
370                         break;
371                 case SMU_UCLK:
372                 case SMU_FCLK:
373                 case SMU_MCLK:
374                         ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
375                         if (ret)
376                                 goto failed;
377                         break;
378                 case SMU_SOCCLK:
379                         ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
380                         if (ret)
381                                 goto failed;
382                         break;
383                 default:
384                         ret = -EINVAL;
385                         goto failed;
386                 }
387         }
388 failed:
389         return ret;
390 }
391
392 static int smu_v12_0_mode2_reset(struct smu_context *smu){
393         return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2);
394 }
395
396 static int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
397                             uint32_t min, uint32_t max)
398 {
399         int ret = 0;
400
401         if (max < min)
402                 return -EINVAL;
403
404         switch (clk_type) {
405         case SMU_GFXCLK:
406         case SMU_SCLK:
407                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min);
408                 if (ret)
409                         return ret;
410
411                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max);
412                 if (ret)
413                         return ret;
414         break;
415         case SMU_FCLK:
416         case SMU_MCLK:
417                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min);
418                 if (ret)
419                         return ret;
420
421                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max);
422                 if (ret)
423                         return ret;
424         break;
425         case SMU_SOCCLK:
426                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min);
427                 if (ret)
428                         return ret;
429
430                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max);
431                 if (ret)
432                         return ret;
433         break;
434         case SMU_VCLK:
435                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min);
436                 if (ret)
437                         return ret;
438
439                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max);
440                 if (ret)
441                         return ret;
442         break;
443         default:
444                 return -EINVAL;
445         }
446
447         return ret;
448 }
449
450 static const struct smu_funcs smu_v12_0_funcs = {
451         .check_fw_status = smu_v12_0_check_fw_status,
452         .check_fw_version = smu_v12_0_check_fw_version,
453         .powergate_sdma = smu_v12_0_powergate_sdma,
454         .powergate_vcn = smu_v12_0_powergate_vcn,
455         .send_smc_msg = smu_v12_0_send_msg,
456         .send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
457         .read_smc_arg = smu_v12_0_read_arg,
458         .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
459         .gfx_off_control = smu_v12_0_gfx_off_control,
460         .init_smc_tables = smu_v12_0_init_smc_tables,
461         .fini_smc_tables = smu_v12_0_fini_smc_tables,
462         .populate_smc_tables = smu_v12_0_populate_smc_tables,
463         .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
464         .mode2_reset = smu_v12_0_mode2_reset,
465         .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
466 };
467
468 void smu_v12_0_set_smu_funcs(struct smu_context *smu)
469 {
470         struct amdgpu_device *adev = smu->adev;
471
472         smu->funcs = &smu_v12_0_funcs;
473
474         switch (adev->asic_type) {
475         case CHIP_RENOIR:
476                 renoir_set_ppt_funcs(smu);
477                 break;
478         default:
479                 pr_warn("Unknown asic for smu12\n");
480         }
481 }