2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
29 #include "smu_v11_0.h"
33 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
39 case AMD_IP_BLOCK_TYPE_UVD:
40 ret = smu_dpm_set_uvd_enable(smu, gate);
42 case AMD_IP_BLOCK_TYPE_VCE:
43 ret = smu_dpm_set_vce_enable(smu, gate);
52 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
54 /* not support power state */
55 return POWER_STATE_TYPE_DEFAULT;
58 int smu_get_power_num_states(struct smu_context *smu,
59 struct pp_states_info *state_info)
64 /* not support power state */
65 memset(state_info, 0, sizeof(struct pp_states_info));
71 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
72 void *data, uint32_t *size)
77 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
78 *((uint32_t *)data) = smu->pstate_sclk;
81 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
82 *((uint32_t *)data) = smu->pstate_mclk;
85 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
86 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
100 int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg,
101 void *table_data, bool drv2smu)
103 struct smu_table_context *smu_table = &smu->smu_table;
104 struct smu_table *table = NULL;
106 uint32_t table_index;
108 if (!table_data || table_id >= smu_table->table_count)
111 table_index = (exarg << 16) | table_id;
113 table = &smu_table->tables[table_id];
116 memcpy(table->cpu_addr, table_data, table->size);
118 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
119 upper_32_bits(table->mc_address));
122 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
123 lower_32_bits(table->mc_address));
126 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
127 SMU_MSG_TransferTableDram2Smu :
128 SMU_MSG_TransferTableSmu2Dram,
134 memcpy(table_data, table->cpu_addr, table->size);
139 bool is_support_sw_smu(struct amdgpu_device *adev)
144 if (adev->asic_type >= CHIP_VEGA20 && adev->asic_type != CHIP_RAVEN)
150 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
152 struct smu_table_context *smu_table = &smu->smu_table;
154 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
157 if (smu_table->hardcode_pptable)
158 *table = smu_table->hardcode_pptable;
160 *table = smu_table->power_play_table;
162 return smu_table->power_play_table_size;
165 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
167 struct smu_table_context *smu_table = &smu->smu_table;
168 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
171 if (header->usStructureSize != size) {
172 pr_err("pp table size not matched !\n");
176 mutex_lock(&smu->mutex);
177 if (!smu_table->hardcode_pptable)
178 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
179 if (!smu_table->hardcode_pptable) {
184 memcpy(smu_table->hardcode_pptable, buf, size);
185 smu_table->power_play_table = smu_table->hardcode_pptable;
186 smu_table->power_play_table_size = size;
187 mutex_unlock(&smu->mutex);
189 ret = smu_reset(smu);
191 pr_info("smu reset failed, ret = %d\n", ret);
196 mutex_unlock(&smu->mutex);
200 int smu_feature_init_dpm(struct smu_context *smu)
202 struct smu_feature *feature = &smu->smu_feature;
204 uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];
206 mutex_lock(&feature->mutex);
207 bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
208 mutex_unlock(&feature->mutex);
210 ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask,
215 mutex_lock(&feature->mutex);
216 bitmap_andnot(feature->allowed, feature->allowed,
217 (unsigned long *)unallowed_feature_mask,
218 feature->feature_num);
219 mutex_unlock(&feature->mutex);
224 int smu_feature_is_enabled(struct smu_context *smu, int feature_id)
226 struct smu_feature *feature = &smu->smu_feature;
229 WARN_ON(feature_id > feature->feature_num);
231 mutex_lock(&feature->mutex);
232 ret = test_bit(feature_id, feature->enabled);
233 mutex_unlock(&feature->mutex);
238 int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable)
240 struct smu_feature *feature = &smu->smu_feature;
243 WARN_ON(feature_id > feature->feature_num);
245 mutex_lock(&feature->mutex);
246 ret = smu_feature_update_enable_state(smu, feature_id, enable);
251 test_and_set_bit(feature_id, feature->enabled);
253 test_and_clear_bit(feature_id, feature->enabled);
256 mutex_unlock(&feature->mutex);
261 int smu_feature_is_supported(struct smu_context *smu, int feature_id)
263 struct smu_feature *feature = &smu->smu_feature;
266 WARN_ON(feature_id > feature->feature_num);
268 mutex_lock(&feature->mutex);
269 ret = test_bit(feature_id, feature->supported);
270 mutex_unlock(&feature->mutex);
275 int smu_feature_set_supported(struct smu_context *smu, int feature_id,
278 struct smu_feature *feature = &smu->smu_feature;
281 WARN_ON(feature_id > feature->feature_num);
283 mutex_lock(&feature->mutex);
285 test_and_set_bit(feature_id, feature->supported);
287 test_and_clear_bit(feature_id, feature->supported);
288 mutex_unlock(&feature->mutex);
293 static int smu_set_funcs(struct amdgpu_device *adev)
295 struct smu_context *smu = &adev->smu;
297 switch (adev->asic_type) {
299 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
300 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
301 smu->od_enabled = true;
302 smu_v11_0_set_smu_funcs(smu);
311 static int smu_early_init(void *handle)
313 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
314 struct smu_context *smu = &adev->smu;
317 mutex_init(&smu->mutex);
319 return smu_set_funcs(adev);
322 static int smu_late_init(void *handle)
324 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
325 struct smu_context *smu = &adev->smu;
326 mutex_lock(&smu->mutex);
327 smu_handle_task(&adev->smu,
328 smu->smu_dpm.dpm_level,
329 AMD_PP_TASK_COMPLETE_INIT);
330 mutex_unlock(&smu->mutex);
335 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
336 uint16_t *size, uint8_t *frev, uint8_t *crev,
339 struct amdgpu_device *adev = smu->adev;
342 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
343 size, frev, crev, &data_start))
346 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
351 static int smu_initialize_pptable(struct smu_context *smu)
357 static int smu_smc_table_sw_init(struct smu_context *smu)
361 ret = smu_initialize_pptable(smu);
363 pr_err("Failed to init smu_initialize_pptable!\n");
368 * Create smu_table structure, and init smc tables such as
369 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
371 ret = smu_init_smc_tables(smu);
373 pr_err("Failed to init smc tables!\n");
378 * Create smu_power_context structure, and allocate smu_dpm_context and
379 * context size to fill the smu_power_context data.
381 ret = smu_init_power(smu);
383 pr_err("Failed to init smu_init_power!\n");
390 static int smu_smc_table_sw_fini(struct smu_context *smu)
394 ret = smu_fini_smc_tables(smu);
396 pr_err("Failed to smu_fini_smc_tables!\n");
403 static int smu_sw_init(void *handle)
405 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
406 struct smu_context *smu = &adev->smu;
409 if (!is_support_sw_smu(adev))
412 smu->pool_size = adev->pm.smu_prv_buffer_size;
413 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
414 mutex_init(&smu->smu_feature.mutex);
415 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
416 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
417 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
418 smu->watermarks_bitmap = 0;
419 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
420 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
422 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
423 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
424 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
425 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
426 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
427 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
428 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
429 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
431 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
432 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
433 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
434 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
435 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
436 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
437 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
438 smu->display_config = &adev->pm.pm_display_cfg;
440 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
441 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
442 ret = smu_init_microcode(smu);
444 pr_err("Failed to load smu firmware!\n");
448 ret = smu_smc_table_sw_init(smu);
450 pr_err("Failed to sw init smc table!\n");
457 static int smu_sw_fini(void *handle)
459 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
460 struct smu_context *smu = &adev->smu;
463 if (!is_support_sw_smu(adev))
466 ret = smu_smc_table_sw_fini(smu);
468 pr_err("Failed to sw fini smc table!\n");
472 ret = smu_fini_power(smu);
474 pr_err("Failed to init smu_fini_power!\n");
481 static int smu_init_fb_allocations(struct smu_context *smu)
483 struct amdgpu_device *adev = smu->adev;
484 struct smu_table_context *smu_table = &smu->smu_table;
485 struct smu_table *tables = smu_table->tables;
486 uint32_t table_count = smu_table->table_count;
490 if (table_count <= 0)
493 for (i = 0 ; i < table_count; i++) {
494 if (tables[i].size == 0)
496 ret = amdgpu_bo_create_kernel(adev,
501 &tables[i].mc_address,
502 &tables[i].cpu_addr);
510 if (tables[i].size == 0)
512 amdgpu_bo_free_kernel(&tables[i].bo,
513 &tables[i].mc_address,
514 &tables[i].cpu_addr);
520 static int smu_fini_fb_allocations(struct smu_context *smu)
522 struct smu_table_context *smu_table = &smu->smu_table;
523 struct smu_table *tables = smu_table->tables;
524 uint32_t table_count = smu_table->table_count;
527 if (table_count == 0 || tables == NULL)
530 for (i = 0 ; i < table_count; i++) {
531 if (tables[i].size == 0)
533 amdgpu_bo_free_kernel(&tables[i].bo,
534 &tables[i].mc_address,
535 &tables[i].cpu_addr);
541 static int smu_override_pcie_parameters(struct smu_context *smu)
543 struct amdgpu_device *adev = smu->adev;
544 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
547 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
549 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
551 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
553 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
556 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
557 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
558 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
560 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
562 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
564 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
566 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
568 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
570 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
573 smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
574 ret = smu_send_smc_msg_with_param(smu,
575 SMU_MSG_OverridePcieParameters,
578 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
582 static int smu_smc_table_hw_init(struct smu_context *smu,
585 struct amdgpu_device *adev = smu->adev;
588 if (smu_is_dpm_running(smu) && adev->in_suspend) {
589 pr_info("dpm has been enabled\n");
593 ret = smu_init_display(smu);
598 ret = smu_read_pptable_from_vbios(smu);
602 /* get boot_values from vbios to set revision, gfxclk, and etc. */
603 ret = smu_get_vbios_bootup_values(smu);
607 ret = smu_get_clk_info_from_vbios(smu);
612 * check if the format_revision in vbios is up to pptable header
613 * version, and the structure size is not 0.
615 ret = smu_get_clk_info_from_vbios(smu);
619 ret = smu_check_pptable(smu);
624 * allocate vram bos to store smc table contents.
626 ret = smu_init_fb_allocations(smu);
631 * Parse pptable format and fill PPTable_t smc_pptable to
632 * smu_table_context structure. And read the smc_dpm_table from vbios,
633 * then fill it into smc_pptable.
635 ret = smu_parse_pptable(smu);
640 * Send msg GetDriverIfVersion to check if the return value is equal
641 * with DRIVER_IF_VERSION of smc header.
643 ret = smu_check_fw_version(smu);
649 * Copy pptable bo in the vram to smc with SMU MSGs such as
650 * SetDriverDramAddr and TransferTableDram2Smu.
652 ret = smu_write_pptable(smu);
656 /* issue RunAfllBtc msg */
657 ret = smu_run_afll_btc(smu);
661 ret = smu_feature_set_allowed_mask(smu);
665 ret = smu_system_features_control(smu, true);
669 ret = smu_override_pcie_parameters(smu);
673 ret = smu_notify_display_change(smu);
678 * Set min deep sleep dce fclk with bootup value from vbios via
679 * SetMinDeepSleepDcefclk MSG.
681 ret = smu_set_min_dcef_deep_sleep(smu);
686 * Set initialized values (get from vbios) to dpm tables context such as
687 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
691 ret = smu_populate_smc_pptable(smu);
695 ret = smu_init_max_sustainable_clocks(smu);
700 ret = smu_set_od8_default_settings(smu, initialize);
705 ret = smu_populate_umd_state_clk(smu);
709 ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
715 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
717 ret = smu_set_tool_table_location(smu);
723 * smu_alloc_memory_pool - allocate memory pool in the system memory
725 * @smu: amdgpu_device pointer
727 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
728 * and DramLogSetDramAddr can notify it changed.
730 * Returns 0 on success, error on failure.
732 static int smu_alloc_memory_pool(struct smu_context *smu)
734 struct amdgpu_device *adev = smu->adev;
735 struct smu_table_context *smu_table = &smu->smu_table;
736 struct smu_table *memory_pool = &smu_table->memory_pool;
737 uint64_t pool_size = smu->pool_size;
740 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
743 memory_pool->size = pool_size;
744 memory_pool->align = PAGE_SIZE;
745 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
748 case SMU_MEMORY_POOL_SIZE_256_MB:
749 case SMU_MEMORY_POOL_SIZE_512_MB:
750 case SMU_MEMORY_POOL_SIZE_1_GB:
751 case SMU_MEMORY_POOL_SIZE_2_GB:
752 ret = amdgpu_bo_create_kernel(adev,
757 &memory_pool->mc_address,
758 &memory_pool->cpu_addr);
767 static int smu_free_memory_pool(struct smu_context *smu)
769 struct smu_table_context *smu_table = &smu->smu_table;
770 struct smu_table *memory_pool = &smu_table->memory_pool;
773 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
776 amdgpu_bo_free_kernel(&memory_pool->bo,
777 &memory_pool->mc_address,
778 &memory_pool->cpu_addr);
780 memset(memory_pool, 0, sizeof(struct smu_table));
785 static int smu_hw_init(void *handle)
788 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
789 struct smu_context *smu = &adev->smu;
791 if (!is_support_sw_smu(adev))
794 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
795 ret = smu_load_microcode(smu);
800 ret = smu_check_fw_status(smu);
802 pr_err("SMC firmware status is not correct\n");
806 mutex_lock(&smu->mutex);
808 ret = smu_feature_init_dpm(smu);
812 ret = smu_smc_table_hw_init(smu, true);
816 ret = smu_alloc_memory_pool(smu);
821 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
824 ret = smu_notify_memory_pool_location(smu);
828 ret = smu_start_thermal_control(smu);
832 mutex_unlock(&smu->mutex);
834 adev->pm.dpm_enabled = true;
836 pr_info("SMU is initialized successfully!\n");
841 mutex_unlock(&smu->mutex);
845 static int smu_hw_fini(void *handle)
847 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
848 struct smu_context *smu = &adev->smu;
849 struct smu_table_context *table_context = &smu->smu_table;
852 if (!is_support_sw_smu(adev))
855 kfree(table_context->driver_pptable);
856 table_context->driver_pptable = NULL;
858 kfree(table_context->max_sustainable_clocks);
859 table_context->max_sustainable_clocks = NULL;
861 kfree(table_context->od_feature_capabilities);
862 table_context->od_feature_capabilities = NULL;
864 kfree(table_context->od_settings_max);
865 table_context->od_settings_max = NULL;
867 kfree(table_context->od_settings_min);
868 table_context->od_settings_min = NULL;
870 kfree(table_context->overdrive_table);
871 table_context->overdrive_table = NULL;
873 kfree(table_context->od8_settings);
874 table_context->od8_settings = NULL;
876 ret = smu_fini_fb_allocations(smu);
880 ret = smu_free_memory_pool(smu);
887 int smu_reset(struct smu_context *smu)
889 struct amdgpu_device *adev = smu->adev;
892 ret = smu_hw_fini(adev);
896 ret = smu_hw_init(adev);
903 static int smu_suspend(void *handle)
906 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
907 struct smu_context *smu = &adev->smu;
909 if (!is_support_sw_smu(adev))
912 ret = smu_system_features_control(smu, false);
916 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
921 static int smu_resume(void *handle)
924 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
925 struct smu_context *smu = &adev->smu;
927 if (!is_support_sw_smu(adev))
930 pr_info("SMU is resuming...\n");
932 mutex_lock(&smu->mutex);
934 ret = smu_smc_table_hw_init(smu, false);
938 ret = smu_start_thermal_control(smu);
942 mutex_unlock(&smu->mutex);
944 pr_info("SMU is resumed successfully!\n");
948 mutex_unlock(&smu->mutex);
952 int smu_display_configuration_change(struct smu_context *smu,
953 const struct amd_pp_display_configuration *display_config)
956 int num_of_active_display = 0;
958 if (!is_support_sw_smu(smu->adev))
964 mutex_lock(&smu->mutex);
966 smu_set_deep_sleep_dcefclk(smu,
967 display_config->min_dcef_deep_sleep_set_clk / 100);
969 for (index = 0; index < display_config->num_path_including_non_display; index++) {
970 if (display_config->displays[index].controller_id != 0)
971 num_of_active_display++;
974 smu_set_active_display_count(smu, num_of_active_display);
976 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
977 display_config->cpu_cc6_disable,
978 display_config->cpu_pstate_disable,
979 display_config->nb_pstate_switch_disable);
981 mutex_unlock(&smu->mutex);
986 static int smu_get_clock_info(struct smu_context *smu,
987 struct smu_clock_info *clk_info,
988 enum smu_perf_level_designation designation)
991 struct smu_performance_level level = {0};
996 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1000 clk_info->min_mem_clk = level.memory_clock;
1001 clk_info->min_eng_clk = level.core_clock;
1002 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1004 ret = smu_get_perf_level(smu, designation, &level);
1008 clk_info->min_mem_clk = level.memory_clock;
1009 clk_info->min_eng_clk = level.core_clock;
1010 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1015 int smu_get_current_clocks(struct smu_context *smu,
1016 struct amd_pp_clock_info *clocks)
1018 struct amd_pp_simple_clock_info simple_clocks = {0};
1019 struct smu_clock_info hw_clocks;
1022 if (!is_support_sw_smu(smu->adev))
1025 mutex_lock(&smu->mutex);
1027 smu_get_dal_power_level(smu, &simple_clocks);
1029 if (smu->support_power_containment)
1030 ret = smu_get_clock_info(smu, &hw_clocks,
1031 PERF_LEVEL_POWER_CONTAINMENT);
1033 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1036 pr_err("Error in smu_get_clock_info\n");
1040 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1041 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1042 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1043 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1044 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1045 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1046 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1047 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1049 if (simple_clocks.level == 0)
1050 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1052 clocks->max_clocks_state = simple_clocks.level;
1054 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1055 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1056 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1060 mutex_unlock(&smu->mutex);
1064 static int smu_set_clockgating_state(void *handle,
1065 enum amd_clockgating_state state)
1070 static int smu_set_powergating_state(void *handle,
1071 enum amd_powergating_state state)
1076 static int smu_enable_umd_pstate(void *handle,
1077 enum amd_dpm_forced_level *level)
1079 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1080 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1081 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1082 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1084 struct smu_context *smu = (struct smu_context*)(handle);
1085 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1086 if (!smu_dpm_ctx->dpm_context)
1089 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1090 /* enter umd pstate, save current level, disable gfx cg*/
1091 if (*level & profile_mode_mask) {
1092 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1093 smu_dpm_ctx->enable_umd_pstate = true;
1094 amdgpu_device_ip_set_clockgating_state(smu->adev,
1095 AMD_IP_BLOCK_TYPE_GFX,
1096 AMD_CG_STATE_UNGATE);
1097 amdgpu_device_ip_set_powergating_state(smu->adev,
1098 AMD_IP_BLOCK_TYPE_GFX,
1099 AMD_PG_STATE_UNGATE);
1102 /* exit umd pstate, restore level, enable gfx cg*/
1103 if (!(*level & profile_mode_mask)) {
1104 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1105 *level = smu_dpm_ctx->saved_dpm_level;
1106 smu_dpm_ctx->enable_umd_pstate = false;
1107 amdgpu_device_ip_set_clockgating_state(smu->adev,
1108 AMD_IP_BLOCK_TYPE_GFX,
1110 amdgpu_device_ip_set_powergating_state(smu->adev,
1111 AMD_IP_BLOCK_TYPE_GFX,
1119 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1120 enum amd_dpm_forced_level level,
1121 bool skip_display_settings)
1125 uint32_t sclk_mask, mclk_mask, soc_mask;
1127 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1129 if (!skip_display_settings) {
1130 ret = smu_display_config_changed(smu);
1132 pr_err("Failed to change display config!");
1137 ret = smu_apply_clocks_adjust_rules(smu);
1139 pr_err("Failed to apply clocks adjust rules!");
1143 if (!skip_display_settings) {
1144 ret = smu_notify_smc_dispaly_config(smu);
1146 pr_err("Failed to notify smc display config!");
1151 if (smu_dpm_ctx->dpm_level != level) {
1153 case AMD_DPM_FORCED_LEVEL_HIGH:
1154 ret = smu_force_dpm_limit_value(smu, true);
1156 case AMD_DPM_FORCED_LEVEL_LOW:
1157 ret = smu_force_dpm_limit_value(smu, false);
1160 case AMD_DPM_FORCED_LEVEL_AUTO:
1161 ret = smu_unforce_dpm_levels(smu);
1164 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1165 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1166 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1167 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1168 ret = smu_get_profiling_clk_mask(smu, level,
1174 smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
1175 smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
1178 case AMD_DPM_FORCED_LEVEL_MANUAL:
1179 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1185 smu_dpm_ctx->dpm_level = level;
1188 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1189 index = fls(smu->workload_mask);
1190 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1191 workload = smu->workload_setting[index];
1193 if (smu->power_profile_mode != workload)
1194 smu_set_power_profile_mode(smu, &workload, 0);
1200 int smu_handle_task(struct smu_context *smu,
1201 enum amd_dpm_forced_level level,
1202 enum amd_pp_task task_id)
1207 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1208 ret = smu_pre_display_config_changed(smu);
1211 ret = smu_set_cpu_power_state(smu);
1214 ret = smu_adjust_power_state_dynamic(smu, level, false);
1216 case AMD_PP_TASK_COMPLETE_INIT:
1217 case AMD_PP_TASK_READJUST_POWER_STATE:
1218 ret = smu_adjust_power_state_dynamic(smu, level, true);
1227 const struct amd_ip_funcs smu_ip_funcs = {
1229 .early_init = smu_early_init,
1230 .late_init = smu_late_init,
1231 .sw_init = smu_sw_init,
1232 .sw_fini = smu_sw_fini,
1233 .hw_init = smu_hw_init,
1234 .hw_fini = smu_hw_fini,
1235 .suspend = smu_suspend,
1236 .resume = smu_resume,
1238 .check_soft_reset = NULL,
1239 .wait_for_idle = NULL,
1241 .set_clockgating_state = smu_set_clockgating_state,
1242 .set_powergating_state = smu_set_powergating_state,
1243 .enable_umd_pstate = smu_enable_umd_pstate,
1246 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1248 .type = AMD_IP_BLOCK_TYPE_SMC,
1252 .funcs = &smu_ip_funcs,