]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/amd/display: hook navi10 pplib functions
authorhersen wu <hersenxs.wu@amd.com>
Thu, 23 May 2019 17:23:25 +0000 (13:23 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Sat, 22 Jun 2019 14:34:07 +0000 (09:34 -0500)
during bring up time, before window dc-ppplib interface
design, linux dc use raven dc-pplib interface.
now nvai10 dc-pplib-smu interface is changed and verified
under window, navi10 need its specific dc-pplib-smu
interface. todo: hook set_hard_min_uclk_by_freq,
get_maximum_sustainable_clocks

Signed-off-by: hersen wu <hersenxs.wu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Roman Li <Roman.Li@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c

index f78ca2be664ff36460225875c64900f43b9784a6..4d4531c18c1497130ff2befd4f5c6066c01c4548 100644 (file)
@@ -647,16 +647,279 @@ void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
        pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
 }
 
+enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
+               struct pp_smu_wm_range_sets *ranges)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+       struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
+       struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
+                       wm_with_clock_ranges.wm_dmif_clocks_ranges;
+       struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
+                       wm_with_clock_ranges.wm_mcif_clocks_ranges;
+       int32_t i;
+
+       wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
+       wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
+
+       for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
+               if (ranges->reader_wm_sets[i].wm_inst > 3)
+                       wm_dce_clocks[i].wm_set_id = WM_SET_A;
+               else
+                       wm_dce_clocks[i].wm_set_id =
+                                       ranges->reader_wm_sets[i].wm_inst;
+               wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
+                       ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
+               wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
+                       ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
+               wm_dce_clocks[i].wm_max_mem_clk_in_khz =
+                       ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
+               wm_dce_clocks[i].wm_min_mem_clk_in_khz =
+                       ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
+       }
+
+       for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
+               if (ranges->writer_wm_sets[i].wm_inst > 3)
+                       wm_soc_clocks[i].wm_set_id = WM_SET_A;
+               else
+                       wm_soc_clocks[i].wm_set_id =
+                                       ranges->writer_wm_sets[i].wm_inst;
+               wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
+                       ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
+               wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
+                       ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
+               wm_soc_clocks[i].wm_max_mem_clk_in_khz =
+                       ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
+               wm_soc_clocks[i].wm_min_mem_clk_in_khz =
+                       ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
+       }
+
+       if (!smu->funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       /* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL;
+        * 1: fail
+        */
+       if (smu_set_watermarks_for_clock_ranges(&adev->smu,
+                       &wm_with_clock_ranges))
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       return PP_SMU_RESULT_OK;
+}
+
+enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+
+       if (!smu->funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       /* 0: successful or smu.funcs->set_azalia_d3_pme = NULL;  1: fail */
+       if (smu_set_azalia_d3_pme(smu))
+               return PP_SMU_RESULT_FAIL;
+
+       return PP_SMU_RESULT_OK;
+}
+
+enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+
+       if (!smu->funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       /* 0: successful or smu.funcs->set_display_count = NULL;  1: fail */
+       if (smu_set_display_count(smu, count))
+               return PP_SMU_RESULT_FAIL;
+
+       return PP_SMU_RESULT_OK;
+}
+
+enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+
+       if (!smu->funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       /* 0: successful or smu.funcs->set_deep_sleep_dcefclk = NULL;1: fail */
+       if (smu_set_deep_sleep_dcefclk(smu, mhz))
+               return PP_SMU_RESULT_FAIL;
+
+       return PP_SMU_RESULT_OK;
+}
+
+enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
+               struct pp_smu *pp, int mhz)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+       struct pp_display_clock_request clock_req;
+
+       if (!smu->funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       clock_req.clock_type = amd_pp_dcef_clock;
+       clock_req.clock_freq_in_khz = mhz * 1000;
+
+       /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+        * 1: fail
+        */
+       if (smu_display_clock_voltage_request(smu, &clock_req))
+               return PP_SMU_RESULT_FAIL;
+
+       return PP_SMU_RESULT_OK;
+}
+
+enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+       struct pp_display_clock_request clock_req;
+
+       if (!smu->funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       clock_req.clock_type = amd_pp_mem_clock;
+       clock_req.clock_freq_in_khz = mhz * 1000;
+
+       /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+        * 1: fail
+        */
+       if (smu_display_clock_voltage_request(smu, &clock_req))
+               return PP_SMU_RESULT_FAIL;
+
+       return PP_SMU_RESULT_OK;
+}
+
+enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
+               enum pp_smu_nv_clock_id clock_id, int mhz)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+       struct pp_display_clock_request clock_req;
+
+       if (!smu->funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       switch (clock_id) {
+       case PP_SMU_NV_DISPCLK:
+               clock_req.clock_type = amd_pp_disp_clock;
+               break;
+       case PP_SMU_NV_PHYCLK:
+               clock_req.clock_type = amd_pp_phy_clock;
+               break;
+       case PP_SMU_NV_PIXELCLK:
+               clock_req.clock_type = amd_pp_pixel_clock;
+               break;
+       default:
+               break;
+       }
+       clock_req.clock_freq_in_khz = mhz * 1000;
+
+       /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+        * 1: fail
+        */
+       if (smu_display_clock_voltage_request(smu, &clock_req))
+               return PP_SMU_RESULT_FAIL;
+
+       return PP_SMU_RESULT_OK;
+}
+
+enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
+               struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+
+       if (!smu->funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       if (!smu->funcs->get_max_sustainable_clocks_by_dc)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks))
+               return PP_SMU_RESULT_OK;
+
+       return PP_SMU_RESULT_FAIL;
+}
+
+enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
+               unsigned int *clock_values_in_khz, unsigned int *num_states)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+
+       if (!smu->ppt_funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       if (!smu->ppt_funcs->get_uclk_dpm_states)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       if (!smu->ppt_funcs->get_uclk_dpm_states(smu,
+                       clock_values_in_khz, num_states))
+               return PP_SMU_RESULT_OK;
+
+       return PP_SMU_RESULT_FAIL;
+}
+
 void dm_pp_get_funcs(
                struct dc_context *ctx,
                struct pp_smu_funcs *funcs)
 {
-       funcs->rv_funcs.pp_smu.dm = ctx;
-       funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
-       funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
-       funcs->rv_funcs.set_display_count = pp_rv_set_active_display_count;
-       funcs->rv_funcs.set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk;
-       funcs->rv_funcs.set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq;
-       funcs->rv_funcs.set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq;
+       switch (ctx->dce_version) {
+       case DCN_VERSION_1_0:
+       case DCN_VERSION_1_01:
+               funcs->ctx.ver = PP_SMU_VER_RV;
+               funcs->rv_funcs.pp_smu.dm = ctx;
+               funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
+               funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
+               funcs->rv_funcs.set_display_count =
+                               pp_rv_set_active_display_count;
+               funcs->rv_funcs.set_min_deep_sleep_dcfclk =
+                               pp_rv_set_min_deep_sleep_dcfclk;
+               funcs->rv_funcs.set_hard_min_dcfclk_by_freq =
+                               pp_rv_set_hard_min_dcefclk_by_freq;
+               funcs->rv_funcs.set_hard_min_fclk_by_freq =
+                               pp_rv_set_hard_min_fclk_by_freq;
+               break;
+#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+       case DCN_VERSION_2_0:
+               funcs->ctx.ver = PP_SMU_VER_NV;
+               funcs->nv_funcs.pp_smu.dm = ctx;
+               funcs->nv_funcs.set_display_count = pp_nv_set_display_count;
+               funcs->nv_funcs.set_hard_min_dcfclk_by_freq =
+                               pp_nv_set_hard_min_dcefclk_by_freq;
+               funcs->nv_funcs.set_min_deep_sleep_dcfclk =
+                               pp_nv_set_min_deep_sleep_dcfclk;
+               funcs->nv_funcs.set_voltage_by_freq =
+                               pp_nv_set_voltage_by_freq;
+               funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges;
+
+               /* todo set_pme_wa_enable cause 4k@6ohz display not light up */
+               funcs->nv_funcs.set_pme_wa_enable = NULL;
+               /* todo debug waring message */
+               funcs->nv_funcs.set_hard_min_uclk_by_freq = NULL;
+               /* todo  compare data with window driver*/
+               funcs->nv_funcs.get_maximum_sustainable_clocks = NULL;
+               /*todo  compare data with window driver */
+               funcs->nv_funcs.get_uclk_dpm_states = NULL;
+               break;
+#endif
+       default:
+               DRM_ERROR("smu version is not supported !\n");
+               break;
+       }
 }
-