]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/amdgpu: keep copy of VRAM lost counter in job
authorChristian König <christian.koenig@amd.com>
Mon, 9 Oct 2017 13:04:41 +0000 (15:04 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 19 Oct 2017 19:27:03 +0000 (15:27 -0400)
Instead of reading the current counter from fpriv.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c

index 76033e2cdba8addb8df37715533a22def00c42ac..aa70f8c045b1ed65eb904b210e237b6955bf9b0b 100644 (file)
@@ -1125,6 +1125,7 @@ struct amdgpu_job {
        uint32_t                gds_base, gds_size;
        uint32_t                gws_base, gws_size;
        uint32_t                oa_base, oa_size;
+       uint32_t                vram_lost_counter;
 
        /* user fence handling */
        uint64_t                uf_addr;
index 0c07df72743c5146c305fe4de1853daf49857952..9daa7cac0ffbadb7d9cf3b2b35a4dfbcb7ab2b11 100644 (file)
@@ -172,6 +172,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
        if (ret)
                goto free_all_kdata;
 
+       p->job->vram_lost_counter = fpriv->vram_lost_counter;
+
        if (p->uf_entry.robj)
                p->job->uf_addr = uf_offset;
        kfree(chunk_array);
index 83d13431cbdd1093b35267ab21a3fc3a8b8b9f3c..4f2b5acc8743f09f451f5d04ec4fabe9c686453c 100644 (file)
@@ -61,6 +61,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
        (*job)->vm = vm;
        (*job)->ibs = (void *)&(*job)[1];
        (*job)->num_ibs = num_ibs;
+       (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
 
        amdgpu_sync_create(&(*job)->sync);
        amdgpu_sync_create(&(*job)->dep_sync);
@@ -180,8 +181,8 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
 static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
 {
        struct dma_fence *fence = NULL;
+       struct amdgpu_device *adev;
        struct amdgpu_job *job;
-       struct amdgpu_fpriv *fpriv = NULL;
        int r;
 
        if (!sched_job) {
@@ -189,17 +190,17 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
                return NULL;
        }
        job = to_amdgpu_job(sched_job);
+       adev = job->adev;
 
        BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
 
        trace_amdgpu_sched_run_job(job);
-       if (job->vm)
-               fpriv = container_of(job->vm, struct amdgpu_fpriv, vm);
        /* skip ib schedule when vram is lost */
-       if (fpriv && amdgpu_kms_vram_lost(job->adev, fpriv))
+       if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) {
                DRM_ERROR("Skip scheduling IBs!\n");
-       else {
-               r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence);
+       } else {
+               r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
+                                      &fence);
                if (r)
                        DRM_ERROR("Error scheduling IBs (%d)\n", r);
        }