]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/amdgpu: XGMI pstate switch initial support
authorshaoyunl <shaoyun.liu@amd.com>
Wed, 20 Mar 2019 20:14:56 +0000 (16:14 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 28 Mar 2019 03:40:43 +0000 (22:40 -0500)
Driver vote low to high pstate switch whenever there is an outstanding
XGMI mapping request. Driver vote high to low pstate when all the
outstanding XGMI mapping is terminated.

Signed-off-by: shaoyunl <shaoyun.liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h

index 865e067b1b5bf3dd999ffe20a812d239b2e55cca..7cee269ec3e3d54cc0d1716cd93201f4c6101560 100644 (file)
@@ -2018,6 +2018,10 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
        r = amdgpu_device_enable_mgpu_fan_boost();
        if (r)
                DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+
+       /*set to low pstate by default */
+       amdgpu_xgmi_set_pstate(adev, 0);
+
 }
 
 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
index 220a6a7b1bc155f93880ef12b914e6a5aa92eb4d..c430e82590387872a51ec1a3b1cd156bcdf45cf8 100644 (file)
@@ -72,6 +72,8 @@ struct amdgpu_bo_va {
 
        /* If the mappings are cleared or filled */
        bool                            cleared;
+
+       bool                            is_xgmi;
 };
 
 struct amdgpu_bo {
index b7f937ceebee723a9b1b2853dfd16b2ec85abd42..a5c6a1e5fe74545b14c9871fc64a5de82f34a542 100644 (file)
@@ -34,6 +34,7 @@
 #include "amdgpu_trace.h"
 #include "amdgpu_amdkfd.h"
 #include "amdgpu_gmc.h"
+#include "amdgpu_xgmi.h"
 
 /**
  * DOC: GPUVM
@@ -2045,6 +2046,15 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
        INIT_LIST_HEAD(&bo_va->valids);
        INIT_LIST_HEAD(&bo_va->invalids);
 
+       if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev))) {
+               bo_va->is_xgmi = true;
+               mutex_lock(&adev->vm_manager.lock_pstate);
+               /* Power up XGMI if it can be potentially used */
+               if (++adev->vm_manager.xgmi_map_counter == 1)
+                       amdgpu_xgmi_set_pstate(adev, 1);
+               mutex_unlock(&adev->vm_manager.lock_pstate);
+       }
+
        return bo_va;
 }
 
@@ -2463,6 +2473,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
        }
 
        dma_fence_put(bo_va->last_pt_update);
+
+       if (bo && bo_va->is_xgmi) {
+               mutex_lock(&adev->vm_manager.lock_pstate);
+               if (--adev->vm_manager.xgmi_map_counter == 0)
+                       amdgpu_xgmi_set_pstate(adev, 0);
+               mutex_unlock(&adev->vm_manager.lock_pstate);
+       }
+
        kfree(bo_va);
 }
 
@@ -2970,6 +2988,9 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
 
        idr_init(&adev->vm_manager.pasid_idr);
        spin_lock_init(&adev->vm_manager.pasid_lock);
+
+       adev->vm_manager.xgmi_map_counter = 0;
+       mutex_init(&adev->vm_manager.lock_pstate);
 }
 
 /**
index 520122be798b2615cbcaabc9c97d0029ba0915c8..f586b38f30760acfa3b5a58d312651813dba21e2 100644 (file)
@@ -324,6 +324,10 @@ struct amdgpu_vm_manager {
         */
        struct idr                              pasid_idr;
        spinlock_t                              pasid_lock;
+
+       /* counter of mapped memory through xgmi */
+       uint32_t                                xgmi_map_counter;
+       struct mutex                            lock_pstate;
 };
 
 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
index fcc4b05c745cb4c1e02f3b499b5c072f0325886b..336834797af3d9e20bb656730e0ff0c5d6ca57cd 100644 (file)
@@ -200,12 +200,26 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo
 
        if (lock)
                mutex_lock(&tmp->hive_lock);
-
+       tmp->pstate = -1;
        mutex_unlock(&xgmi_mutex);
 
        return tmp;
 }
 
+int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
+{
+       int ret = 0;
+       struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+
+       if (!hive)
+               return 0;
+
+       if (hive->pstate == pstate)
+               return 0;
+       /* Todo : sent the message to SMU for pstate change */
+       return ret;
+}
+
 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
 {
        int ret = -EINVAL;
index 24a3b0362f9892ff4614af5c5485bcb522f094bb..3e9c91e9a4bf9b1549a7e1e7529bb75a1fbf0da5 100644 (file)
@@ -33,11 +33,21 @@ struct amdgpu_hive_info {
        struct kobject *kobj;
        struct device_attribute dev_attr;
        struct amdgpu_device *adev;
+       int pstate; /*0 -- low , 1 -- high , -1 unknown*/
 };
 
 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
 int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
 void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
+
+static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
+               struct amdgpu_device *bo_adev)
+{
+       return (adev != bo_adev &&
+               adev->gmc.xgmi.hive_id &&
+               adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
+}
 
 #endif