]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
dd06d23bb1885ba2c78d4a9786304d32cbe6af88
[linux.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include "pp_debug.h"
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
29 #include "smu_v11_0.h"
30 #include "atom.h"
31 #include "amd_pcie.h"
32
33 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
34 {
35         int ret = 0;
36
37         if (!if_version && !smu_version)
38                 return -EINVAL;
39
40         if (if_version) {
41                 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
42                 if (ret)
43                         return ret;
44
45                 ret = smu_read_smc_arg(smu, if_version);
46                 if (ret)
47                         return ret;
48         }
49
50         if (smu_version) {
51                 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
52                 if (ret)
53                         return ret;
54
55                 ret = smu_read_smc_arg(smu, smu_version);
56                 if (ret)
57                         return ret;
58         }
59
60         return ret;
61 }
62
63 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
64                             uint32_t min, uint32_t max)
65 {
66         int ret = 0, clk_id = 0;
67         uint32_t param;
68
69         if (min <= 0 && max <= 0)
70                 return -EINVAL;
71
72         clk_id = smu_clk_get_index(smu, clk_type);
73         if (clk_id < 0)
74                 return clk_id;
75
76         if (max > 0) {
77                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
78                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
79                                                   param);
80                 if (ret)
81                         return ret;
82         }
83
84         if (min > 0) {
85                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
86                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
87                                                   param);
88                 if (ret)
89                         return ret;
90         }
91
92
93         return ret;
94 }
95
96 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
97                            uint32_t *min, uint32_t *max)
98 {
99         int ret = 0, clk_id = 0;
100         uint32_t param = 0;
101
102         if (!min && !max)
103                 return -EINVAL;
104
105         clk_id = smu_clk_get_index(smu, clk_type);
106         if (clk_id < 0)
107                 return clk_id;
108
109         param = (clk_id & 0xffff) << 16;
110
111         if (max) {
112                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
113                 if (ret)
114                         return ret;
115                 ret = smu_read_smc_arg(smu, max);
116                 if (ret)
117                         return ret;
118         }
119
120         if (min) {
121                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
122                 if (ret)
123                         return ret;
124                 ret = smu_read_smc_arg(smu, min);
125                 if (ret)
126                         return ret;
127         }
128
129         return ret;
130 }
131
132 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
133                               uint16_t level, uint32_t *value)
134 {
135         int ret = 0, clk_id = 0;
136         uint32_t param;
137
138         if (!value)
139                 return -EINVAL;
140
141         clk_id = smu_clk_get_index(smu, clk_type);
142         if (clk_id < 0)
143                 return clk_id;
144
145         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
146
147         ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
148                                           param);
149         if (ret)
150                 return ret;
151
152         ret = smu_read_smc_arg(smu, &param);
153         if (ret)
154                 return ret;
155
156         /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
157          * now, we un-support it */
158         *value = param & 0x7fffffff;
159
160         return ret;
161 }
162
163 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
164                             uint32_t *value)
165 {
166         return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
167 }
168
169 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
170                            bool gate)
171 {
172         int ret = 0;
173
174         switch (block_type) {
175         case AMD_IP_BLOCK_TYPE_UVD:
176                 ret = smu_dpm_set_uvd_enable(smu, gate);
177                 break;
178         case AMD_IP_BLOCK_TYPE_VCE:
179                 ret = smu_dpm_set_vce_enable(smu, gate);
180                 break;
181         default:
182                 break;
183         }
184
185         return ret;
186 }
187
188 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
189 {
190         /* not support power state */
191         return POWER_STATE_TYPE_DEFAULT;
192 }
193
194 int smu_get_power_num_states(struct smu_context *smu,
195                              struct pp_states_info *state_info)
196 {
197         if (!state_info)
198                 return -EINVAL;
199
200         /* not support power state */
201         memset(state_info, 0, sizeof(struct pp_states_info));
202         state_info->nums = 0;
203
204         return 0;
205 }
206
207 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
208                            void *data, uint32_t *size)
209 {
210         int ret = 0;
211
212         switch (sensor) {
213         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
214                 *((uint32_t *)data) = smu->pstate_sclk;
215                 *size = 4;
216                 break;
217         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
218                 *((uint32_t *)data) = smu->pstate_mclk;
219                 *size = 4;
220                 break;
221         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
222                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
223                 *size = 8;
224                 break;
225         default:
226                 ret = -EINVAL;
227                 break;
228         }
229
230         if (ret)
231                 *size = 0;
232
233         return ret;
234 }
235
236 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index,
237                      void *table_data, bool drv2smu)
238 {
239         struct smu_table_context *smu_table = &smu->smu_table;
240         struct smu_table *table = NULL;
241         int ret = 0;
242         int table_id = smu_table_get_index(smu, table_index);
243
244         if (!table_data || table_id >= smu_table->table_count)
245                 return -EINVAL;
246
247         table = &smu_table->tables[table_index];
248
249         if (drv2smu)
250                 memcpy(table->cpu_addr, table_data, table->size);
251
252         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
253                                           upper_32_bits(table->mc_address));
254         if (ret)
255                 return ret;
256         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
257                                           lower_32_bits(table->mc_address));
258         if (ret)
259                 return ret;
260         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
261                                           SMU_MSG_TransferTableDram2Smu :
262                                           SMU_MSG_TransferTableSmu2Dram,
263                                           table_id);
264         if (ret)
265                 return ret;
266
267         if (!drv2smu)
268                 memcpy(table_data, table->cpu_addr, table->size);
269
270         return ret;
271 }
272
273 bool is_support_sw_smu(struct amdgpu_device *adev)
274 {
275         if (adev->asic_type == CHIP_VEGA20)
276                 return (amdgpu_dpm == 2) ? true : false;
277         else if (adev->asic_type >= CHIP_NAVI10)
278                 return true;
279         else
280                 return false;
281 }
282
283 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
284 {
285         struct smu_table_context *smu_table = &smu->smu_table;
286
287         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
288                 return -EINVAL;
289
290         if (smu_table->hardcode_pptable)
291                 *table = smu_table->hardcode_pptable;
292         else
293                 *table = smu_table->power_play_table;
294
295         return smu_table->power_play_table_size;
296 }
297
298 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
299 {
300         struct smu_table_context *smu_table = &smu->smu_table;
301         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
302         int ret = 0;
303
304         if (!smu->pm_enabled)
305                 return -EINVAL;
306         if (header->usStructureSize != size) {
307                 pr_err("pp table size not matched !\n");
308                 return -EIO;
309         }
310
311         mutex_lock(&smu->mutex);
312         if (!smu_table->hardcode_pptable)
313                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
314         if (!smu_table->hardcode_pptable) {
315                 ret = -ENOMEM;
316                 goto failed;
317         }
318
319         memcpy(smu_table->hardcode_pptable, buf, size);
320         smu_table->power_play_table = smu_table->hardcode_pptable;
321         smu_table->power_play_table_size = size;
322         mutex_unlock(&smu->mutex);
323
324         ret = smu_reset(smu);
325         if (ret)
326                 pr_info("smu reset failed, ret = %d\n", ret);
327
328         return ret;
329
330 failed:
331         mutex_unlock(&smu->mutex);
332         return ret;
333 }
334
335 int smu_feature_init_dpm(struct smu_context *smu)
336 {
337         struct smu_feature *feature = &smu->smu_feature;
338         int ret = 0;
339         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
340
341         if (!smu->pm_enabled)
342                 return ret;
343         mutex_lock(&feature->mutex);
344         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
345         mutex_unlock(&feature->mutex);
346
347         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
348                                              SMU_FEATURE_MAX/32);
349         if (ret)
350                 return ret;
351
352         mutex_lock(&feature->mutex);
353         bitmap_or(feature->allowed, feature->allowed,
354                       (unsigned long *)allowed_feature_mask,
355                       feature->feature_num);
356         mutex_unlock(&feature->mutex);
357
358         return ret;
359 }
360
361 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
362 {
363         struct smu_feature *feature = &smu->smu_feature;
364         uint32_t feature_id;
365         int ret = 0;
366
367         feature_id = smu_feature_get_index(smu, mask);
368
369         WARN_ON(feature_id > feature->feature_num);
370
371         mutex_lock(&feature->mutex);
372         ret = test_bit(feature_id, feature->enabled);
373         mutex_unlock(&feature->mutex);
374
375         return ret;
376 }
377
378 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
379                             bool enable)
380 {
381         struct smu_feature *feature = &smu->smu_feature;
382         uint32_t feature_id;
383         int ret = 0;
384
385         feature_id = smu_feature_get_index(smu, mask);
386
387         WARN_ON(feature_id > feature->feature_num);
388
389         mutex_lock(&feature->mutex);
390         ret = smu_feature_update_enable_state(smu, feature_id, enable);
391         if (ret)
392                 goto failed;
393
394         if (enable)
395                 test_and_set_bit(feature_id, feature->enabled);
396         else
397                 test_and_clear_bit(feature_id, feature->enabled);
398
399 failed:
400         mutex_unlock(&feature->mutex);
401
402         return ret;
403 }
404
405 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
406 {
407         struct smu_feature *feature = &smu->smu_feature;
408         uint32_t feature_id;
409         int ret = 0;
410
411         feature_id = smu_feature_get_index(smu, mask);
412
413         WARN_ON(feature_id > feature->feature_num);
414
415         mutex_lock(&feature->mutex);
416         ret = test_bit(feature_id, feature->supported);
417         mutex_unlock(&feature->mutex);
418
419         return ret;
420 }
421
422 int smu_feature_set_supported(struct smu_context *smu,
423                               enum smu_feature_mask mask,
424                               bool enable)
425 {
426         struct smu_feature *feature = &smu->smu_feature;
427         uint32_t feature_id;
428         int ret = 0;
429
430         feature_id = smu_feature_get_index(smu, mask);
431
432         WARN_ON(feature_id > feature->feature_num);
433
434         mutex_lock(&feature->mutex);
435         if (enable)
436                 test_and_set_bit(feature_id, feature->supported);
437         else
438                 test_and_clear_bit(feature_id, feature->supported);
439         mutex_unlock(&feature->mutex);
440
441         return ret;
442 }
443
444 static int smu_set_funcs(struct amdgpu_device *adev)
445 {
446         struct smu_context *smu = &adev->smu;
447
448         switch (adev->asic_type) {
449         case CHIP_VEGA20:
450         case CHIP_NAVI10:
451                 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
452                         smu->od_enabled = true;
453                 smu_v11_0_set_smu_funcs(smu);
454                 break;
455         default:
456                 return -EINVAL;
457         }
458
459         return 0;
460 }
461
462 static int smu_early_init(void *handle)
463 {
464         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
465         struct smu_context *smu = &adev->smu;
466
467         smu->adev = adev;
468         smu->pm_enabled = !!amdgpu_dpm;
469         mutex_init(&smu->mutex);
470
471         return smu_set_funcs(adev);
472 }
473
474 static int smu_late_init(void *handle)
475 {
476         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
477         struct smu_context *smu = &adev->smu;
478
479         if (!smu->pm_enabled)
480                 return 0;
481         mutex_lock(&smu->mutex);
482         smu_handle_task(&adev->smu,
483                         smu->smu_dpm.dpm_level,
484                         AMD_PP_TASK_COMPLETE_INIT);
485         mutex_unlock(&smu->mutex);
486
487         return 0;
488 }
489
490 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
491                             uint16_t *size, uint8_t *frev, uint8_t *crev,
492                             uint8_t **addr)
493 {
494         struct amdgpu_device *adev = smu->adev;
495         uint16_t data_start;
496
497         if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
498                                            size, frev, crev, &data_start))
499                 return -EINVAL;
500
501         *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
502
503         return 0;
504 }
505
506 static int smu_initialize_pptable(struct smu_context *smu)
507 {
508         /* TODO */
509         return 0;
510 }
511
512 static int smu_smc_table_sw_init(struct smu_context *smu)
513 {
514         int ret;
515
516         ret = smu_initialize_pptable(smu);
517         if (ret) {
518                 pr_err("Failed to init smu_initialize_pptable!\n");
519                 return ret;
520         }
521
522         /**
523          * Create smu_table structure, and init smc tables such as
524          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
525          */
526         ret = smu_init_smc_tables(smu);
527         if (ret) {
528                 pr_err("Failed to init smc tables!\n");
529                 return ret;
530         }
531
532         /**
533          * Create smu_power_context structure, and allocate smu_dpm_context and
534          * context size to fill the smu_power_context data.
535          */
536         ret = smu_init_power(smu);
537         if (ret) {
538                 pr_err("Failed to init smu_init_power!\n");
539                 return ret;
540         }
541
542         return 0;
543 }
544
545 static int smu_smc_table_sw_fini(struct smu_context *smu)
546 {
547         int ret;
548
549         ret = smu_fini_smc_tables(smu);
550         if (ret) {
551                 pr_err("Failed to smu_fini_smc_tables!\n");
552                 return ret;
553         }
554
555         return 0;
556 }
557
558 static int smu_sw_init(void *handle)
559 {
560         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
561         struct smu_context *smu = &adev->smu;
562         int ret;
563
564         smu->pool_size = adev->pm.smu_prv_buffer_size;
565         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
566         mutex_init(&smu->smu_feature.mutex);
567         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
568         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
569         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
570         smu->watermarks_bitmap = 0;
571         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
572         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
573
574         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
575         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
576         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
577         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
578         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
579         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
580         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
581         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
582
583         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
584         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
585         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
586         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
587         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
588         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
589         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
590         smu->display_config = &adev->pm.pm_display_cfg;
591
592         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
593         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
594         ret = smu_init_microcode(smu);
595         if (ret) {
596                 pr_err("Failed to load smu firmware!\n");
597                 return ret;
598         }
599
600         ret = smu_smc_table_sw_init(smu);
601         if (ret) {
602                 pr_err("Failed to sw init smc table!\n");
603                 return ret;
604         }
605
606         return 0;
607 }
608
609 static int smu_sw_fini(void *handle)
610 {
611         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
612         struct smu_context *smu = &adev->smu;
613         int ret;
614
615         ret = smu_smc_table_sw_fini(smu);
616         if (ret) {
617                 pr_err("Failed to sw fini smc table!\n");
618                 return ret;
619         }
620
621         ret = smu_fini_power(smu);
622         if (ret) {
623                 pr_err("Failed to init smu_fini_power!\n");
624                 return ret;
625         }
626
627         return 0;
628 }
629
630 static int smu_init_fb_allocations(struct smu_context *smu)
631 {
632         struct amdgpu_device *adev = smu->adev;
633         struct smu_table_context *smu_table = &smu->smu_table;
634         struct smu_table *tables = smu_table->tables;
635         uint32_t table_count = smu_table->table_count;
636         uint32_t i = 0;
637         int32_t ret = 0;
638
639         if (table_count <= 0)
640                 return -EINVAL;
641
642         for (i = 0 ; i < table_count; i++) {
643                 if (tables[i].size == 0)
644                         continue;
645                 ret = amdgpu_bo_create_kernel(adev,
646                                               tables[i].size,
647                                               tables[i].align,
648                                               tables[i].domain,
649                                               &tables[i].bo,
650                                               &tables[i].mc_address,
651                                               &tables[i].cpu_addr);
652                 if (ret)
653                         goto failed;
654         }
655
656         return 0;
657 failed:
658         for (; i > 0; i--) {
659                 if (tables[i].size == 0)
660                         continue;
661                 amdgpu_bo_free_kernel(&tables[i].bo,
662                                       &tables[i].mc_address,
663                                       &tables[i].cpu_addr);
664
665         }
666         return ret;
667 }
668
669 static int smu_fini_fb_allocations(struct smu_context *smu)
670 {
671         struct smu_table_context *smu_table = &smu->smu_table;
672         struct smu_table *tables = smu_table->tables;
673         uint32_t table_count = smu_table->table_count;
674         uint32_t i = 0;
675
676         if (table_count == 0 || tables == NULL)
677                 return 0;
678
679         for (i = 0 ; i < table_count; i++) {
680                 if (tables[i].size == 0)
681                         continue;
682                 amdgpu_bo_free_kernel(&tables[i].bo,
683                                       &tables[i].mc_address,
684                                       &tables[i].cpu_addr);
685         }
686
687         return 0;
688 }
689
690 static int smu_override_pcie_parameters(struct smu_context *smu)
691 {
692         struct amdgpu_device *adev = smu->adev;
693         uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
694         int ret;
695
696         if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
697                 pcie_gen = 3;
698         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
699                 pcie_gen = 2;
700         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
701                 pcie_gen = 1;
702         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
703                 pcie_gen = 0;
704
705         /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
706          * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
707          * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
708          */
709         if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
710                 pcie_width = 6;
711         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
712                 pcie_width = 5;
713         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
714                 pcie_width = 4;
715         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
716                 pcie_width = 3;
717         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
718                 pcie_width = 2;
719         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
720                 pcie_width = 1;
721
722         smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
723         ret = smu_send_smc_msg_with_param(smu,
724                                           SMU_MSG_OverridePcieParameters,
725                                           smu_pcie_arg);
726         if (ret)
727                 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
728         return ret;
729 }
730
731 static int smu_smc_table_hw_init(struct smu_context *smu,
732                                  bool initialize)
733 {
734         struct amdgpu_device *adev = smu->adev;
735         int ret;
736
737         if (smu_is_dpm_running(smu) && adev->in_suspend) {
738                 pr_info("dpm has been enabled\n");
739                 return 0;
740         }
741
742         ret = smu_init_display(smu);
743         if (ret)
744                 return ret;
745
746         if (initialize) {
747                 /* get boot_values from vbios to set revision, gfxclk, and etc. */
748                 ret = smu_get_vbios_bootup_values(smu);
749                 if (ret)
750                         return ret;
751
752                 ret = smu_setup_pptable(smu);
753                 if (ret)
754                         return ret;
755
756                 /*
757                  * check if the format_revision in vbios is up to pptable header
758                  * version, and the structure size is not 0.
759                  */
760                 ret = smu_check_pptable(smu);
761                 if (ret)
762                         return ret;
763
764                 /*
765                  * allocate vram bos to store smc table contents.
766                  */
767                 ret = smu_init_fb_allocations(smu);
768                 if (ret)
769                         return ret;
770
771                 /*
772                  * Parse pptable format and fill PPTable_t smc_pptable to
773                  * smu_table_context structure. And read the smc_dpm_table from vbios,
774                  * then fill it into smc_pptable.
775                  */
776                 ret = smu_parse_pptable(smu);
777                 if (ret)
778                         return ret;
779
780                 /*
781                  * Send msg GetDriverIfVersion to check if the return value is equal
782                  * with DRIVER_IF_VERSION of smc header.
783                  */
784                 ret = smu_check_fw_version(smu);
785                 if (ret)
786                         return ret;
787         }
788
789         /*
790          * Copy pptable bo in the vram to smc with SMU MSGs such as
791          * SetDriverDramAddr and TransferTableDram2Smu.
792          */
793         ret = smu_write_pptable(smu);
794         if (ret)
795                 return ret;
796
797         /* issue RunAfllBtc msg */
798         ret = smu_run_afll_btc(smu);
799         if (ret)
800                 return ret;
801
802         ret = smu_feature_set_allowed_mask(smu);
803         if (ret)
804                 return ret;
805
806         ret = smu_system_features_control(smu, true);
807         if (ret)
808                 return ret;
809
810         ret = smu_override_pcie_parameters(smu);
811         if (ret)
812                 return ret;
813
814         ret = smu_notify_display_change(smu);
815         if (ret)
816                 return ret;
817
818         /*
819          * Set min deep sleep dce fclk with bootup value from vbios via
820          * SetMinDeepSleepDcefclk MSG.
821          */
822         ret = smu_set_min_dcef_deep_sleep(smu);
823         if (ret)
824                 return ret;
825
826         /*
827          * Set initialized values (get from vbios) to dpm tables context such as
828          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
829          * type of clks.
830          */
831         if (initialize) {
832                 ret = smu_populate_smc_pptable(smu);
833                 if (ret)
834                         return ret;
835
836                 ret = smu_init_max_sustainable_clocks(smu);
837                 if (ret)
838                         return ret;
839         }
840
841         ret = smu_set_od8_default_settings(smu, initialize);
842         if (ret)
843                 return ret;
844
845         if (initialize) {
846                 ret = smu_populate_umd_state_clk(smu);
847                 if (ret)
848                         return ret;
849
850                 ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
851                 if (ret)
852                         return ret;
853         }
854
855         /*
856          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
857          */
858         ret = smu_set_tool_table_location(smu);
859
860         if (!smu_is_dpm_running(smu))
861                 pr_info("dpm has been disabled\n");
862
863         return ret;
864 }
865
866 /**
867  * smu_alloc_memory_pool - allocate memory pool in the system memory
868  *
869  * @smu: amdgpu_device pointer
870  *
871  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
872  * and DramLogSetDramAddr can notify it changed.
873  *
874  * Returns 0 on success, error on failure.
875  */
876 static int smu_alloc_memory_pool(struct smu_context *smu)
877 {
878         struct amdgpu_device *adev = smu->adev;
879         struct smu_table_context *smu_table = &smu->smu_table;
880         struct smu_table *memory_pool = &smu_table->memory_pool;
881         uint64_t pool_size = smu->pool_size;
882         int ret = 0;
883
884         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
885                 return ret;
886
887         memory_pool->size = pool_size;
888         memory_pool->align = PAGE_SIZE;
889         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
890
891         switch (pool_size) {
892         case SMU_MEMORY_POOL_SIZE_256_MB:
893         case SMU_MEMORY_POOL_SIZE_512_MB:
894         case SMU_MEMORY_POOL_SIZE_1_GB:
895         case SMU_MEMORY_POOL_SIZE_2_GB:
896                 ret = amdgpu_bo_create_kernel(adev,
897                                               memory_pool->size,
898                                               memory_pool->align,
899                                               memory_pool->domain,
900                                               &memory_pool->bo,
901                                               &memory_pool->mc_address,
902                                               &memory_pool->cpu_addr);
903                 break;
904         default:
905                 break;
906         }
907
908         return ret;
909 }
910
911 static int smu_free_memory_pool(struct smu_context *smu)
912 {
913         struct smu_table_context *smu_table = &smu->smu_table;
914         struct smu_table *memory_pool = &smu_table->memory_pool;
915         int ret = 0;
916
917         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
918                 return ret;
919
920         amdgpu_bo_free_kernel(&memory_pool->bo,
921                               &memory_pool->mc_address,
922                               &memory_pool->cpu_addr);
923
924         memset(memory_pool, 0, sizeof(struct smu_table));
925
926         return ret;
927 }
928
929 static int smu_hw_init(void *handle)
930 {
931         int ret;
932         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
933         struct smu_context *smu = &adev->smu;
934
935         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
936                 ret = smu_check_fw_status(smu);
937                 if (ret) {
938                         pr_err("SMC firmware status is not correct\n");
939                         return ret;
940                 }
941         }
942
943         mutex_lock(&smu->mutex);
944
945         ret = smu_feature_init_dpm(smu);
946         if (ret)
947                 goto failed;
948
949         ret = smu_smc_table_hw_init(smu, true);
950         if (ret)
951                 goto failed;
952
953         ret = smu_alloc_memory_pool(smu);
954         if (ret)
955                 goto failed;
956
957         /*
958          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
959          * pool location.
960          */
961         ret = smu_notify_memory_pool_location(smu);
962         if (ret)
963                 goto failed;
964
965         ret = smu_start_thermal_control(smu);
966         if (ret)
967                 goto failed;
968
969         mutex_unlock(&smu->mutex);
970
971         if (!smu->pm_enabled)
972                 adev->pm.dpm_enabled = false;
973         else
974                 adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
975
976         pr_info("SMU is initialized successfully!\n");
977
978         return 0;
979
980 failed:
981         mutex_unlock(&smu->mutex);
982         return ret;
983 }
984
985 static int smu_hw_fini(void *handle)
986 {
987         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
988         struct smu_context *smu = &adev->smu;
989         struct smu_table_context *table_context = &smu->smu_table;
990         int ret = 0;
991
992         kfree(table_context->driver_pptable);
993         table_context->driver_pptable = NULL;
994
995         kfree(table_context->max_sustainable_clocks);
996         table_context->max_sustainable_clocks = NULL;
997
998         kfree(table_context->od_feature_capabilities);
999         table_context->od_feature_capabilities = NULL;
1000
1001         kfree(table_context->od_settings_max);
1002         table_context->od_settings_max = NULL;
1003
1004         kfree(table_context->od_settings_min);
1005         table_context->od_settings_min = NULL;
1006
1007         kfree(table_context->overdrive_table);
1008         table_context->overdrive_table = NULL;
1009
1010         kfree(table_context->od8_settings);
1011         table_context->od8_settings = NULL;
1012
1013         ret = smu_fini_fb_allocations(smu);
1014         if (ret)
1015                 return ret;
1016
1017         ret = smu_free_memory_pool(smu);
1018         if (ret)
1019                 return ret;
1020
1021         return 0;
1022 }
1023
1024 int smu_reset(struct smu_context *smu)
1025 {
1026         struct amdgpu_device *adev = smu->adev;
1027         int ret = 0;
1028
1029         ret = smu_hw_fini(adev);
1030         if (ret)
1031                 return ret;
1032
1033         ret = smu_hw_init(adev);
1034         if (ret)
1035                 return ret;
1036
1037         return ret;
1038 }
1039
1040 static int smu_suspend(void *handle)
1041 {
1042         int ret;
1043         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1044         struct smu_context *smu = &adev->smu;
1045
1046         ret = smu_system_features_control(smu, false);
1047         if (ret)
1048                 return ret;
1049
1050         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1051
1052         if (adev->asic_type >= CHIP_NAVI10 &&
1053             adev->gfx.rlc.funcs->stop)
1054                 adev->gfx.rlc.funcs->stop(adev);
1055
1056         return 0;
1057 }
1058
1059 static int smu_resume(void *handle)
1060 {
1061         int ret;
1062         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1063         struct smu_context *smu = &adev->smu;
1064
1065         pr_info("SMU is resuming...\n");
1066
1067         mutex_lock(&smu->mutex);
1068
1069         ret = smu_smc_table_hw_init(smu, false);
1070         if (ret)
1071                 goto failed;
1072
1073         ret = smu_start_thermal_control(smu);
1074         if (ret)
1075                 goto failed;
1076
1077         mutex_unlock(&smu->mutex);
1078
1079         pr_info("SMU is resumed successfully!\n");
1080
1081         return 0;
1082 failed:
1083         mutex_unlock(&smu->mutex);
1084         return ret;
1085 }
1086
1087 int smu_display_configuration_change(struct smu_context *smu,
1088                                      const struct amd_pp_display_configuration *display_config)
1089 {
1090         int index = 0;
1091         int num_of_active_display = 0;
1092
1093         if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1094                 return -EINVAL;
1095
1096         if (!display_config)
1097                 return -EINVAL;
1098
1099         mutex_lock(&smu->mutex);
1100
1101         smu_set_deep_sleep_dcefclk(smu,
1102                                    display_config->min_dcef_deep_sleep_set_clk / 100);
1103
1104         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1105                 if (display_config->displays[index].controller_id != 0)
1106                         num_of_active_display++;
1107         }
1108
1109         smu_set_active_display_count(smu, num_of_active_display);
1110
1111         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1112                            display_config->cpu_cc6_disable,
1113                            display_config->cpu_pstate_disable,
1114                            display_config->nb_pstate_switch_disable);
1115
1116         mutex_unlock(&smu->mutex);
1117
1118         return 0;
1119 }
1120
1121 static int smu_get_clock_info(struct smu_context *smu,
1122                               struct smu_clock_info *clk_info,
1123                               enum smu_perf_level_designation designation)
1124 {
1125         int ret;
1126         struct smu_performance_level level = {0};
1127
1128         if (!clk_info)
1129                 return -EINVAL;
1130
1131         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1132         if (ret)
1133                 return -EINVAL;
1134
1135         clk_info->min_mem_clk = level.memory_clock;
1136         clk_info->min_eng_clk = level.core_clock;
1137         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1138
1139         ret = smu_get_perf_level(smu, designation, &level);
1140         if (ret)
1141                 return -EINVAL;
1142
1143         clk_info->min_mem_clk = level.memory_clock;
1144         clk_info->min_eng_clk = level.core_clock;
1145         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1146
1147         return 0;
1148 }
1149
1150 int smu_get_current_clocks(struct smu_context *smu,
1151                            struct amd_pp_clock_info *clocks)
1152 {
1153         struct amd_pp_simple_clock_info simple_clocks = {0};
1154         struct smu_clock_info hw_clocks;
1155         int ret = 0;
1156
1157         if (!is_support_sw_smu(smu->adev))
1158                 return -EINVAL;
1159
1160         mutex_lock(&smu->mutex);
1161
1162         smu_get_dal_power_level(smu, &simple_clocks);
1163
1164         if (smu->support_power_containment)
1165                 ret = smu_get_clock_info(smu, &hw_clocks,
1166                                          PERF_LEVEL_POWER_CONTAINMENT);
1167         else
1168                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1169
1170         if (ret) {
1171                 pr_err("Error in smu_get_clock_info\n");
1172                 goto failed;
1173         }
1174
1175         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1176         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1177         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1178         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1179         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1180         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1181         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1182         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1183
1184         if (simple_clocks.level == 0)
1185                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1186         else
1187                 clocks->max_clocks_state = simple_clocks.level;
1188
1189         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1190                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1191                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1192         }
1193
1194 failed:
1195         mutex_unlock(&smu->mutex);
1196         return ret;
1197 }
1198
1199 static int smu_set_clockgating_state(void *handle,
1200                                      enum amd_clockgating_state state)
1201 {
1202         return 0;
1203 }
1204
1205 static int smu_set_powergating_state(void *handle,
1206                                      enum amd_powergating_state state)
1207 {
1208         return 0;
1209 }
1210
1211 static int smu_enable_umd_pstate(void *handle,
1212                       enum amd_dpm_forced_level *level)
1213 {
1214         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1215                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1216                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1217                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1218
1219         struct smu_context *smu = (struct smu_context*)(handle);
1220         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1221         if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
1222                 return -EINVAL;
1223
1224         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1225                 /* enter umd pstate, save current level, disable gfx cg*/
1226                 if (*level & profile_mode_mask) {
1227                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1228                         smu_dpm_ctx->enable_umd_pstate = true;
1229                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1230                                                                AMD_IP_BLOCK_TYPE_GFX,
1231                                                                AMD_CG_STATE_UNGATE);
1232                         amdgpu_device_ip_set_powergating_state(smu->adev,
1233                                                                AMD_IP_BLOCK_TYPE_GFX,
1234                                                                AMD_PG_STATE_UNGATE);
1235                 }
1236         } else {
1237                 /* exit umd pstate, restore level, enable gfx cg*/
1238                 if (!(*level & profile_mode_mask)) {
1239                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1240                                 *level = smu_dpm_ctx->saved_dpm_level;
1241                         smu_dpm_ctx->enable_umd_pstate = false;
1242                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1243                                                                AMD_IP_BLOCK_TYPE_GFX,
1244                                                                AMD_CG_STATE_GATE);
1245                         amdgpu_device_ip_set_powergating_state(smu->adev,
1246                                                                AMD_IP_BLOCK_TYPE_GFX,
1247                                                                AMD_PG_STATE_GATE);
1248                 }
1249         }
1250
1251         return 0;
1252 }
1253
1254 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1255                                    enum amd_dpm_forced_level level,
1256                                    bool skip_display_settings)
1257 {
1258         int ret = 0;
1259         int index = 0;
1260         uint32_t sclk_mask, mclk_mask, soc_mask;
1261         long workload;
1262         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1263
1264         if (!smu->pm_enabled)
1265                 return -EINVAL;
1266         if (!skip_display_settings) {
1267                 ret = smu_display_config_changed(smu);
1268                 if (ret) {
1269                         pr_err("Failed to change display config!");
1270                         return ret;
1271                 }
1272         }
1273
1274         if (!smu->pm_enabled)
1275                 return -EINVAL;
1276         ret = smu_apply_clocks_adjust_rules(smu);
1277         if (ret) {
1278                 pr_err("Failed to apply clocks adjust rules!");
1279                 return ret;
1280         }
1281
1282         if (!skip_display_settings) {
1283                 ret = smu_notify_smc_dispaly_config(smu);
1284                 if (ret) {
1285                         pr_err("Failed to notify smc display config!");
1286                         return ret;
1287                 }
1288         }
1289
1290         if (smu_dpm_ctx->dpm_level != level) {
1291                 switch (level) {
1292                 case AMD_DPM_FORCED_LEVEL_HIGH:
1293                         ret = smu_force_dpm_limit_value(smu, true);
1294                         break;
1295                 case AMD_DPM_FORCED_LEVEL_LOW:
1296                         ret = smu_force_dpm_limit_value(smu, false);
1297                         break;
1298
1299                 case AMD_DPM_FORCED_LEVEL_AUTO:
1300                         ret = smu_unforce_dpm_levels(smu);
1301                         break;
1302
1303                 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1304                 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1305                 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1306                 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1307                         ret = smu_get_profiling_clk_mask(smu, level,
1308                                                          &sclk_mask,
1309                                                          &mclk_mask,
1310                                                          &soc_mask);
1311                         if (ret)
1312                                 return ret;
1313                         smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
1314                         smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
1315                         break;
1316
1317                 case AMD_DPM_FORCED_LEVEL_MANUAL:
1318                 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1319                 default:
1320                         break;
1321                 }
1322
1323                 if (!ret)
1324                         smu_dpm_ctx->dpm_level = level;
1325         }
1326
1327         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1328                 index = fls(smu->workload_mask);
1329                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1330                 workload = smu->workload_setting[index];
1331
1332                 if (smu->power_profile_mode != workload)
1333                         smu_set_power_profile_mode(smu, &workload, 0);
1334         }
1335
1336         return ret;
1337 }
1338
1339 int smu_handle_task(struct smu_context *smu,
1340                     enum amd_dpm_forced_level level,
1341                     enum amd_pp_task task_id)
1342 {
1343         int ret = 0;
1344
1345         switch (task_id) {
1346         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1347                 ret = smu_pre_display_config_changed(smu);
1348                 if (ret)
1349                         return ret;
1350                 ret = smu_set_cpu_power_state(smu);
1351                 if (ret)
1352                         return ret;
1353                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1354                 break;
1355         case AMD_PP_TASK_COMPLETE_INIT:
1356         case AMD_PP_TASK_READJUST_POWER_STATE:
1357                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1358                 break;
1359         default:
1360                 break;
1361         }
1362
1363         return ret;
1364 }
1365
1366 const struct amd_ip_funcs smu_ip_funcs = {
1367         .name = "smu",
1368         .early_init = smu_early_init,
1369         .late_init = smu_late_init,
1370         .sw_init = smu_sw_init,
1371         .sw_fini = smu_sw_fini,
1372         .hw_init = smu_hw_init,
1373         .hw_fini = smu_hw_fini,
1374         .suspend = smu_suspend,
1375         .resume = smu_resume,
1376         .is_idle = NULL,
1377         .check_soft_reset = NULL,
1378         .wait_for_idle = NULL,
1379         .soft_reset = NULL,
1380         .set_clockgating_state = smu_set_clockgating_state,
1381         .set_powergating_state = smu_set_powergating_state,
1382         .enable_umd_pstate = smu_enable_umd_pstate,
1383 };
1384
1385 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1386 {
1387         .type = AMD_IP_BLOCK_TYPE_SMC,
1388         .major = 11,
1389         .minor = 0,
1390         .rev = 0,
1391         .funcs = &smu_ip_funcs,
1392 };