]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/amdgpu: preserve RSMU UMC index mode state
authorJohn Clements <john.clements@amd.com>
Fri, 10 Jan 2020 09:38:31 +0000 (17:38 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 14 Jan 2020 15:18:10 +0000 (10:18 -0500)
between UMC RAS err register access restore previous RSMU UMC index mode state

Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: John Clements <john.clements@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/umc_v6_1.c

index 11428b66c74e246065cdfc721974fbc9d505d7e1..793bf70e64b1931af561d6d74e1d74925d9cdcf9 100644 (file)
@@ -54,12 +54,30 @@ const uint32_t
                {9, 25, 0, 16},         {15, 31, 6, 22}
 };
 
+static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev)
+{
+       WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+                       RSMU_UMC_INDEX_MODE_EN, 1);
+}
+
 static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
 {
        WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
                        RSMU_UMC_INDEX_MODE_EN, 0);
 }
 
+static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev)
+{
+       uint32_t rsmu_umc_index;
+
+       rsmu_umc_index = RREG32_SOC15(RSMU, 0,
+                       mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+
+       return REG_GET_FIELD(rsmu_umc_index,
+                       RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+                       RSMU_UMC_INDEX_MODE_EN);
+}
+
 static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev,
                                            uint32_t umc_inst,
                                            uint32_t ch_inst)
@@ -163,6 +181,11 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
        uint32_t ch_inst         = 0;
        uint32_t umc_reg_offset  = 0;
 
+       uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
+
+       if (rsmu_umc_index_state)
+               umc_v6_1_disable_umc_index_mode(adev);
+
        LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
                umc_reg_offset = get_umc_6_reg_offset(adev,
                                                      umc_inst,
@@ -175,6 +198,9 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
                                                          umc_reg_offset,
                                                          &(err_data->ue_count));
        }
+
+       if (rsmu_umc_index_state)
+               umc_v6_1_enable_umc_index_mode(adev);
 }
 
 static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
@@ -216,8 +242,8 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
        if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
            (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
            REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
-               err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
 
+               err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
                /* the lowest lsb bits should be ignored */
                lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB);
                err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
@@ -257,6 +283,11 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
        uint32_t ch_inst         = 0;
        uint32_t umc_reg_offset  = 0;
 
+       uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
+
+       if (rsmu_umc_index_state)
+               umc_v6_1_disable_umc_index_mode(adev);
+
        LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
                umc_reg_offset = get_umc_6_reg_offset(adev,
                                                      umc_inst,
@@ -269,6 +300,8 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
                                             umc_inst);
        }
 
+       if (rsmu_umc_index_state)
+               umc_v6_1_enable_umc_index_mode(adev);
 }
 
 static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev,
@@ -315,7 +348,10 @@ static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
        uint32_t ch_inst         = 0;
        uint32_t umc_reg_offset  = 0;
 
-       umc_v6_1_disable_umc_index_mode(adev);
+       uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
+
+       if (rsmu_umc_index_state)
+               umc_v6_1_disable_umc_index_mode(adev);
 
        LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
                umc_reg_offset = get_umc_6_reg_offset(adev,
@@ -324,6 +360,9 @@ static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
 
                umc_v6_1_err_cnt_init_per_channel(adev, umc_reg_offset);
        }
+
+       if (rsmu_umc_index_state)
+               umc_v6_1_enable_umc_index_mode(adev);
 }
 
 const struct amdgpu_umc_funcs umc_v6_1_funcs = {