]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list
authorNirmoy Das <nirmoy.das@amd.com>
Fri, 6 Dec 2019 15:55:49 +0000 (16:55 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 18 Dec 2019 21:09:12 +0000 (16:09 -0500)
drm_sched_entity_init() takes drm gpu scheduler list instead of
drm_sched_rq list. This makes conversion of drm_sched_rq list
to drm gpu scheduler list unnecessary

Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/si_dma.c

index cc4ef4db90e5a26aa4d2b3242c9450e5ed9e43e2..db91663df4f6c026292146a5d91ddd27734a9ad3 100644 (file)
@@ -2786,7 +2786,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        adev->mman.buffer_funcs = NULL;
        adev->mman.buffer_funcs_ring = NULL;
        adev->vm_manager.vm_pte_funcs = NULL;
-       adev->vm_manager.vm_pte_num_rqs = 0;
+       adev->vm_manager.vm_pte_num_scheds = 0;
        adev->gmc.gmc_funcs = NULL;
        adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
        bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
index a0be1d2f02dcecdcb00ff2340961ca5bb833bde6..4dc75eda1d91b2d1fda76ff872cfd655a594d7f2 100644 (file)
@@ -2740,7 +2740,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 {
        struct amdgpu_bo_param bp;
        struct amdgpu_bo *root;
-       struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
        int r, i;
 
        vm->va = RB_ROOT_CACHED;
@@ -2754,19 +2753,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        spin_lock_init(&vm->invalidated_lock);
        INIT_LIST_HEAD(&vm->freed);
 
-       for (i = 0; i < adev->vm_manager.vm_pte_num_rqs; i++)
-               sched_list[i] = adev->vm_manager.vm_pte_rqs[i]->sched;
 
        /* create scheduler entities for page table updates */
        r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
-                                 sched_list, adev->vm_manager.vm_pte_num_rqs,
-                                 NULL);
+                                 adev->vm_manager.vm_pte_scheds,
+                                 adev->vm_manager.vm_pte_num_scheds, NULL);
        if (r)
                return r;
 
        r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
-                                 sched_list, adev->vm_manager.vm_pte_num_rqs,
-                                 NULL);
+                                 adev->vm_manager.vm_pte_scheds,
+                                 adev->vm_manager.vm_pte_num_scheds, NULL);
        if (r)
                goto error_free_direct;
 
index 7e0eb36da27deaae5e36d7810f0277d60e184c43..fade4f45320c63d7745a1258fbcee2d7867ebeaf 100644 (file)
@@ -327,8 +327,8 @@ struct amdgpu_vm_manager {
        u64                                     vram_base_offset;
        /* vm pte handling */
        const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
-       struct drm_sched_rq                     *vm_pte_rqs[AMDGPU_MAX_RINGS];
-       unsigned                                vm_pte_num_rqs;
+       struct drm_gpu_scheduler                *vm_pte_scheds[AMDGPU_MAX_RINGS];
+       unsigned                                vm_pte_num_scheds;
        struct amdgpu_ring                      *page_fault;
 
        /* partial resident texture handling */
index c45304f1047c537b5e93a86b1f6697131ff3f922..b79e8ec4bde29b8e0854a4581e76672727fc07bd 100644 (file)
@@ -1372,16 +1372,14 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
 
 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-       struct drm_gpu_scheduler *sched;
        unsigned i;
 
        adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
        for (i = 0; i < adev->sdma.num_instances; i++) {
-               sched = &adev->sdma.instance[i].ring.sched;
-               adev->vm_manager.vm_pte_rqs[i] =
-                       &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+               adev->vm_manager.vm_pte_scheds[i] =
+                       &adev->sdma.instance[i].ring.sched;
        }
-       adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+       adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version cik_sdma_ip_block =
index a101758380130907538ff13a533b6a2a5a190293..45e1428d42f808e33cc23e6d6d6d89ff62399898 100644 (file)
@@ -1260,16 +1260,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
 
 static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-       struct drm_gpu_scheduler *sched;
        unsigned i;
 
        adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
        for (i = 0; i < adev->sdma.num_instances; i++) {
-               sched = &adev->sdma.instance[i].ring.sched;
-               adev->vm_manager.vm_pte_rqs[i] =
-                       &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+               adev->vm_manager.vm_pte_scheds[i] =
+                       &adev->sdma.instance[i].ring.sched;
        }
-       adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+       adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
index 5f4e2c616241fc6b5e0fbf6aaf173720a698af09..5f0d2469924d38be26fc6877a383fbcacb9bf0a2 100644 (file)
@@ -1698,16 +1698,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
 
 static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-       struct drm_gpu_scheduler *sched;
        unsigned i;
 
        adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
        for (i = 0; i < adev->sdma.num_instances; i++) {
-               sched = &adev->sdma.instance[i].ring.sched;
-               adev->vm_manager.vm_pte_rqs[i] =
-                       &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+               adev->vm_manager.vm_pte_scheds[i] =
+                        &adev->sdma.instance[i].ring.sched;
        }
-       adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+       adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
index 4ef4d31f52318086961e6637e9a32ece34778f0b..492bca89b3e9d5f73a22abcd20709729edb84937 100644 (file)
@@ -2409,10 +2409,9 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
                        sched = &adev->sdma.instance[i].page.sched;
                else
                        sched = &adev->sdma.instance[i].ring.sched;
-               adev->vm_manager.vm_pte_rqs[i] =
-                       &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+               adev->vm_manager.vm_pte_scheds[i] = sched;
        }
-       adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+       adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
index 1d5b47162979b1b07ae57d6ef880c30e0e701f4a..1243cfefa2a5ddf70890b13e9926a2ee0eb46ac4 100644 (file)
@@ -1711,17 +1711,15 @@ static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
 
 static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-       struct drm_gpu_scheduler *sched;
        unsigned i;
 
        if (adev->vm_manager.vm_pte_funcs == NULL) {
                adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
                for (i = 0; i < adev->sdma.num_instances; i++) {
-                       sched = &adev->sdma.instance[i].ring.sched;
-                       adev->vm_manager.vm_pte_rqs[i] =
-                               &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+                       adev->vm_manager.vm_pte_scheds[i] =
+                               &adev->sdma.instance[i].ring.sched;
                }
-               adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+               adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
        }
 }
 
index bdda8b4e03f08c01a221061307272ff27d988c53..9aac9f9c50bb60cbde0a87744a18b3849151d9df 100644 (file)
@@ -834,16 +834,14 @@ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
 
 static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-       struct drm_gpu_scheduler *sched;
        unsigned i;
 
        adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
        for (i = 0; i < adev->sdma.num_instances; i++) {
-               sched = &adev->sdma.instance[i].ring.sched;
-               adev->vm_manager.vm_pte_rqs[i] =
-                       &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+               adev->vm_manager.vm_pte_scheds[i] =
+                       &adev->sdma.instance[i].ring.sched;
        }
-       adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+       adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version si_dma_ip_block =