]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drm/amd/powerplay: refine code to support no-dpm case
[linux.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
index ec3dbbeb8f76ca79797e9865916b73c3902c5a2f..9320cf3ef036997a463e3f045dbb164653e0f74e 100644 (file)
@@ -519,26 +519,19 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
 {
        struct smu_table_context *smu_table = &smu->smu_table;
        struct amdgpu_device *adev = smu->adev;
-       struct smu_table *table = NULL;
-       int ret = 0;
+       struct smu_table *table = &smu_table->driver_table;
        int table_id = smu_table_get_index(smu, table_index);
+       uint32_t table_size;
+       int ret = 0;
 
        if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
                return -EINVAL;
 
-       table = &smu_table->tables[table_index];
+       table_size = smu_table->tables[table_index].size;
 
        if (drv2smu)
-               memcpy(table->cpu_addr, table_data, table->size);
+               memcpy(table->cpu_addr, table_data, table_size);
 
-       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
-                                         upper_32_bits(table->mc_address));
-       if (ret)
-               return ret;
-       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
-                                         lower_32_bits(table->mc_address));
-       if (ret)
-               return ret;
        ret = smu_send_smc_msg_with_param(smu, drv2smu ?
                                          SMU_MSG_TransferTableDram2Smu :
                                          SMU_MSG_TransferTableSmu2Dram,
@@ -550,7 +543,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
        adev->nbio.funcs->hdp_flush(adev, NULL);
 
        if (!drv2smu)
-               memcpy(table_data, table->cpu_addr, table->size);
+               memcpy(table_data, table->cpu_addr, table_size);
 
        return ret;
 }
@@ -976,32 +969,56 @@ static int smu_init_fb_allocations(struct smu_context *smu)
        struct amdgpu_device *adev = smu->adev;
        struct smu_table_context *smu_table = &smu->smu_table;
        struct smu_table *tables = smu_table->tables;
+       struct smu_table *driver_table = &(smu_table->driver_table);
+       uint32_t max_table_size = 0;
        int ret, i;
 
-       for (i = 0; i < SMU_TABLE_COUNT; i++) {
-               if (tables[i].size == 0)
-                       continue;
+       /* VRAM allocation for tool table */
+       if (tables[SMU_TABLE_PMSTATUSLOG].size) {
                ret = amdgpu_bo_create_kernel(adev,
-                                             tables[i].size,
-                                             tables[i].align,
-                                             tables[i].domain,
-                                             &tables[i].bo,
-                                             &tables[i].mc_address,
-                                             &tables[i].cpu_addr);
-               if (ret)
-                       goto failed;
+                                             tables[SMU_TABLE_PMSTATUSLOG].size,
+                                             tables[SMU_TABLE_PMSTATUSLOG].align,
+                                             tables[SMU_TABLE_PMSTATUSLOG].domain,
+                                             &tables[SMU_TABLE_PMSTATUSLOG].bo,
+                                             &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+                                             &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
+               if (ret) {
+                       pr_err("VRAM allocation for tool table failed!\n");
+                       return ret;
+               }
        }
 
-       return 0;
-failed:
-       while (--i >= 0) {
+       /* VRAM allocation for driver table */
+       for (i = 0; i < SMU_TABLE_COUNT; i++) {
                if (tables[i].size == 0)
                        continue;
-               amdgpu_bo_free_kernel(&tables[i].bo,
-                                     &tables[i].mc_address,
-                                     &tables[i].cpu_addr);
 
+               if (i == SMU_TABLE_PMSTATUSLOG)
+                       continue;
+
+               if (max_table_size < tables[i].size)
+                       max_table_size = tables[i].size;
        }
+
+       driver_table->size = max_table_size;
+       driver_table->align = PAGE_SIZE;
+       driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+       ret = amdgpu_bo_create_kernel(adev,
+                                     driver_table->size,
+                                     driver_table->align,
+                                     driver_table->domain,
+                                     &driver_table->bo,
+                                     &driver_table->mc_address,
+                                     &driver_table->cpu_addr);
+       if (ret) {
+               pr_err("VRAM allocation for driver table failed!\n");
+               if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
+                       amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
+                                             &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+                                             &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
+       }
+
        return ret;
 }
 
@@ -1009,18 +1026,19 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
        struct smu_table *tables = smu_table->tables;
-       uint32_t i = 0;
+       struct smu_table *driver_table = &(smu_table->driver_table);
 
        if (!tables)
                return 0;
 
-       for (i = 0; i < SMU_TABLE_COUNT; i++) {
-               if (tables[i].size == 0)
-                       continue;
-               amdgpu_bo_free_kernel(&tables[i].bo,
-                                     &tables[i].mc_address,
-                                     &tables[i].cpu_addr);
-       }
+       if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
+               amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
+                                     &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+                                     &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
+
+       amdgpu_bo_free_kernel(&driver_table->bo,
+                             &driver_table->mc_address,
+                             &driver_table->cpu_addr);
 
        return 0;
 }
@@ -1091,6 +1109,10 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
 
        /* smu_dump_pptable(smu); */
        if (!amdgpu_sriov_vf(adev)) {
+               ret = smu_set_driver_table_location(smu);
+               if (ret)
+                       return ret;
+
                /*
                 * Copy pptable bo in the vram to smc with SMU MSGs such as
                 * SetDriverDramAddr and TransferTableDram2Smu.
@@ -1341,6 +1363,9 @@ static int smu_hw_fini(void *handle)
                smu_powergate_jpeg(&adev->smu, true);
        }
 
+       if (!smu->pm_enabled)
+               return 0;
+
        if (!amdgpu_sriov_vf(adev)){
                ret = smu_stop_thermal_control(smu);
                if (ret) {
@@ -1413,6 +1438,9 @@ static int smu_suspend(void *handle)
        struct smu_context *smu = &adev->smu;
        bool baco_feature_is_enabled = false;
 
+       if (!smu->pm_enabled)
+               return 0;
+
        if(!smu->is_apu)
                baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
 
@@ -1445,6 +1473,12 @@ static int smu_resume(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = &adev->smu;
 
+       if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+               return 0;
+
+       if (!smu->pm_enabled)
+               return 0;
+
        pr_info("SMU is resuming...\n");
 
        ret = smu_start_smc_engine(smu);
@@ -1923,32 +1957,25 @@ int smu_set_df_cstate(struct smu_context *smu,
 
 int smu_write_watermarks_table(struct smu_context *smu)
 {
-       int ret = 0;
-       struct smu_table_context *smu_table = &smu->smu_table;
-       struct smu_table *table = NULL;
+       void *watermarks_table = smu->smu_table.watermarks_table;
 
-       table = &smu_table->tables[SMU_TABLE_WATERMARKS];
-
-       if (!table->cpu_addr)
+       if (!watermarks_table)
                return -EINVAL;
 
-       ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
+       return smu_update_table(smu,
+                               SMU_TABLE_WATERMARKS,
+                               0,
+                               watermarks_table,
                                true);
-
-       return ret;
 }
 
 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
                struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
 {
-       struct smu_table *watermarks;
-       void *table;
+       void *table = smu->smu_table.watermarks_table;
 
-       if (!smu->smu_table.tables)
-               return 0;
-
-       watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
-       table = watermarks->cpu_addr;
+       if (!table)
+               return -EINVAL;
 
        mutex_lock(&smu->mutex);