]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/amdgpu: attempt xgmi perfmon re-arm on failed arm
authorJonathan Kim <jonathan.kim@amd.com>
Mon, 16 Dec 2019 17:31:57 +0000 (12:31 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 23 Dec 2019 19:56:02 +0000 (14:56 -0500)
The DF routines to arm xGMI performance will attempt to re-arm both on
performance monitoring start and read on initial failure to arm.

Signed-off-by: Jonathan Kim <Jonathan.Kim@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/df_v3_6.c

index 4043ebcea5de6e1efca0a89dad8356e5c5de385e..2f884d941e8d369b301617288cf9d1406edd12cd 100644 (file)
@@ -183,6 +183,61 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
        spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 }
 
+/* same as perfmon_wreg but return status on write value check */
+static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
+                                         uint32_t lo_addr, uint32_t lo_val,
+                                         uint32_t hi_addr, uint32_t  hi_val)
+{
+       unsigned long flags, address, data;
+       uint32_t lo_val_rb, hi_val_rb;
+
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(address, lo_addr);
+       WREG32(data, lo_val);
+       WREG32(address, hi_addr);
+       WREG32(data, hi_val);
+
+       WREG32(address, lo_addr);
+       lo_val_rb = RREG32(data);
+       WREG32(address, hi_addr);
+       hi_val_rb = RREG32(data);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+       if (!(lo_val == lo_val_rb && hi_val == hi_val_rb))
+               return -EBUSY;
+
+       return 0;
+}
+
+
+/*
+ * retry arming counters every 100 usecs within 1 millisecond interval.
+ * if retry fails after time out, return error.
+ */
+#define ARM_RETRY_USEC_TIMEOUT 1000
+#define ARM_RETRY_USEC_INTERVAL        100
+static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
+                                         uint32_t lo_addr, uint32_t lo_val,
+                                         uint32_t hi_addr, uint32_t  hi_val)
+{
+       int countdown = ARM_RETRY_USEC_TIMEOUT;
+
+       while (countdown) {
+
+               if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val,
+                                                    hi_addr, hi_val))
+                       break;
+
+               countdown -= ARM_RETRY_USEC_INTERVAL;
+               udelay(ARM_RETRY_USEC_INTERVAL);
+       }
+
+       return countdown > 0 ? 0 : -ETIME;
+}
+
 /* get the number of df counters available */
 static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
                struct device_attribute *attr,
@@ -334,20 +389,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
        switch (target_cntr) {
 
        case 0:
-               *lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
-               *hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
+               *lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
+               *hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
                break;
        case 1:
-               *lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
-               *hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
+               *lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
+               *hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
                break;
        case 2:
-               *lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
-               *hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
+               *lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
+               *hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
                break;
        case 3:
-               *lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
-               *hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
+               *lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
+               *hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
                break;
 
        }
@@ -422,6 +477,44 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
        return -ENOSPC;
 }
 
+#define DEFERRED_ARM_MASK      (1 << 31)
+static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
+                                   uint64_t config, bool is_deferred)
+{
+       int target_cntr;
+
+       target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+       if (target_cntr < 0)
+               return -EINVAL;
+
+       if (is_deferred)
+               adev->df_perfmon_config_assign_mask[target_cntr] |=
+                                                       DEFERRED_ARM_MASK;
+       else
+               adev->df_perfmon_config_assign_mask[target_cntr] &=
+                                                       ~DEFERRED_ARM_MASK;
+
+       return 0;
+}
+
+static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
+                                   uint64_t config)
+{
+       int target_cntr;
+
+       target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+       /*
+        * we never get target_cntr < 0 since this funciton is only called in
+        * pmc_count for now but we should check anyways.
+        */
+       return (target_cntr >= 0 &&
+                       (adev->df_perfmon_config_assign_mask[target_cntr]
+                       & DEFERRED_ARM_MASK));
+
+}
+
 /* release performance counter */
 static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
                                     uint64_t config)
@@ -451,29 +544,33 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
                             int is_enable)
 {
        uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
-       int ret = 0;
+       int err = 0, ret = 0;
 
        switch (adev->asic_type) {
        case CHIP_VEGA20:
+               if (is_enable)
+                       return df_v3_6_pmc_add_cntr(adev, config);
 
                df_v3_6_reset_perfmon_cntr(adev, config);
 
-               if (is_enable) {
-                       ret = df_v3_6_pmc_add_cntr(adev, config);
-               } else {
-                       ret = df_v3_6_pmc_get_ctrl_settings(adev,
+               ret = df_v3_6_pmc_get_ctrl_settings(adev,
                                        config,
                                        &lo_base_addr,
                                        &hi_base_addr,
                                        &lo_val,
                                        &hi_val);
 
-                       if (ret)
-                               return ret;
+               if (ret)
+                       return ret;
+
+               err = df_v3_6_perfmon_arm_with_retry(adev,
+                                                    lo_base_addr,
+                                                    lo_val,
+                                                    hi_base_addr,
+                                                    hi_val);
 
-                       df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val,
-                                       hi_base_addr, hi_val);
-               }
+               if (err)
+                       ret = df_v3_6_pmc_set_deferred(adev, config, true);
 
                break;
        default:
@@ -501,7 +598,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
                if (ret)
                        return ret;
 
-               df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
+               df_v3_6_reset_perfmon_cntr(adev, config);
 
                if (is_disable)
                        df_v3_6_pmc_release_cntr(adev, config);
@@ -518,18 +615,29 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
                                  uint64_t config,
                                  uint64_t *count)
 {
-       uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+       uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
        *count = 0;
 
        switch (adev->asic_type) {
        case CHIP_VEGA20:
-
                df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
                                      &hi_base_addr);
 
                if ((lo_base_addr == 0) || (hi_base_addr == 0))
                        return;
 
+               /* rearm the counter or throw away count value on failure */
+               if (df_v3_6_pmc_is_deferred(adev, config)) {
+                       int rearm_err = df_v3_6_perfmon_arm_with_status(adev,
+                                                       lo_base_addr, lo_val,
+                                                       hi_base_addr, hi_val);
+
+                       if (rearm_err)
+                               return;
+
+                       df_v3_6_pmc_set_deferred(adev, config, false);
+               }
+
                df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
                                hi_base_addr, &hi_val);
 
@@ -542,7 +650,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
                         config, lo_base_addr, hi_base_addr, lo_val, hi_val);
 
                break;
-
        default:
                break;
        }