]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/amdgpu: individualize fence allocation per entity
authorNirmoy Das <nirmoy.das@amd.com>
Tue, 21 Jan 2020 12:29:20 +0000 (13:29 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 22 Jan 2020 21:55:27 +0000 (16:55 -0500)
Allocate fences for each entity and remove ctx->fences reference as
fences should be bound to amdgpu_ctx_entity instead amdgpu_ctx.

Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h

index 64e2babbc36efad282ae49f0f19764496ffa082f..05c2af61e7de93aff6004355597df30bc5ad008f 100644 (file)
@@ -87,24 +87,24 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
        memset(ctx, 0, sizeof(*ctx));
        ctx->adev = adev;
 
-       ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
-                             sizeof(struct dma_fence*), GFP_KERNEL);
-       if (!ctx->fences)
-               return -ENOMEM;
 
        ctx->entities[0] = kcalloc(num_entities,
                                   sizeof(struct amdgpu_ctx_entity),
                                   GFP_KERNEL);
-       if (!ctx->entities[0]) {
-               r = -ENOMEM;
-               goto error_free_fences;
-       }
+       if (!ctx->entities[0])
+               return -ENOMEM;
+
 
        for (i = 0; i < num_entities; ++i) {
                struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
 
                entity->sequence = 1;
-               entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
+               entity->fences = kcalloc(amdgpu_sched_jobs,
+                                        sizeof(struct dma_fence*), GFP_KERNEL);
+               if (!entity->fences) {
+                       r = -ENOMEM;
+                       goto error_cleanup_memory;
+               }
        }
        for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
                ctx->entities[i] = ctx->entities[i - 1] +
@@ -181,11 +181,17 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 error_cleanup_entities:
        for (i = 0; i < num_entities; ++i)
                drm_sched_entity_destroy(&ctx->entities[0][i].entity);
-       kfree(ctx->entities[0]);
 
-error_free_fences:
-       kfree(ctx->fences);
-       ctx->fences = NULL;
+error_cleanup_memory:
+       for (i = 0; i < num_entities; ++i) {
+               struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
+
+               kfree(entity->fences);
+               entity->fences = NULL;
+       }
+
+       kfree(ctx->entities[0]);
+       ctx->entities[0] = NULL;
        return r;
 }
 
@@ -199,12 +205,16 @@ static void amdgpu_ctx_fini(struct kref *ref)
        if (!adev)
                return;
 
-       for (i = 0; i < num_entities; ++i)
+       for (i = 0; i < num_entities; ++i) {
+               struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
+
                for (j = 0; j < amdgpu_sched_jobs; ++j)
-                       dma_fence_put(ctx->entities[0][i].fences[j]);
-       kfree(ctx->fences);
-       kfree(ctx->entities[0]);
+                       dma_fence_put(entity->fences[j]);
 
+               kfree(entity->fences);
+       }
+
+       kfree(ctx->entities[0]);
        mutex_destroy(&ctx->lock);
 
        kfree(ctx);
index 4ad90a44dc3cd36ffbee9361af04283366c7dee1..a6cd9d4b078c3c61e8606be2d32b29a794d20fec 100644 (file)
@@ -42,7 +42,6 @@ struct amdgpu_ctx {
        unsigned                        reset_counter_query;
        uint32_t                        vram_lost_counter;
        spinlock_t                      ring_lock;
-       struct dma_fence                **fences;
        struct amdgpu_ctx_entity        *entities[AMDGPU_HW_IP_NUM];
        bool                            preamble_presented;
        enum drm_sched_priority         init_priority;