]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drm/amdgpu: correctly sign extend 48bit addresses v3
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cs.c
index 5b70a30967ec1965b05921c435a602876e279053..135d9d8c95067269e586d49b41fc99b63dfa7473 100644 (file)
@@ -835,7 +835,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
                        if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
                                continue;
 
-                       va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK;
+                       va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
                        r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
                        if (r) {
                                DRM_ERROR("IB va_start is invalid\n");
@@ -946,7 +946,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
        if (r)
                return r;
 
-       p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
+       p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
 
        if (amdgpu_vm_debug) {
                /* Invalidate all BOs to test for userspace bugs */
@@ -1194,38 +1194,28 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
        int r;
 
+       job = p->job;
+       p->job = NULL;
+
+       r = drm_sched_job_init(&job->base, entity, p->filp);
+       if (r)
+               goto error_unlock;
+
+       /* No memory allocation is allowed while holding the mn lock */
        amdgpu_mn_lock(p->mn);
        amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
                struct amdgpu_bo *bo = e->robj;
 
                if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
-                       amdgpu_mn_unlock(p->mn);
-                       return -ERESTARTSYS;
+                       r = -ERESTARTSYS;
+                       goto error_abort;
                }
        }
 
-       job = p->job;
-       p->job = NULL;
-
-       r = drm_sched_job_init(&job->base, entity, p->filp);
-       if (r) {
-               amdgpu_job_free(job);
-               amdgpu_mn_unlock(p->mn);
-               return r;
-       }
-
        job->owner = p->filp;
        p->fence = dma_fence_get(&job->base.s_fence->finished);
 
-       r = amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
-       if (r) {
-               dma_fence_put(p->fence);
-               dma_fence_put(&job->base.s_fence->finished);
-               amdgpu_job_free(job);
-               amdgpu_mn_unlock(p->mn);
-               return r;
-       }
-
+       amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
        amdgpu_cs_post_dependencies(p);
 
        if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
@@ -1247,10 +1237,21 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        ring = to_amdgpu_ring(entity->rq->sched);
        amdgpu_ring_priority_get(ring, priority);
 
+       amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
+
        ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
        amdgpu_mn_unlock(p->mn);
 
        return 0;
+
+error_abort:
+       dma_fence_put(&job->base.s_fence->finished);
+       job->base.s_fence = NULL;
+       amdgpu_mn_unlock(p->mn);
+
+error_unlock:
+       amdgpu_job_free(job);
+       return r;
 }
 
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)