]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drm/amdgpu: correctly sign extend 48bit addresses v3
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cs.c
index 502b94fb116a7070af89ce51da182c3a936e48a0..135d9d8c95067269e586d49b41fc99b63dfa7473 100644 (file)
@@ -32,6 +32,7 @@
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 #include "amdgpu_gmc.h"
+#include "amdgpu_gem.h"
 
 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
                                      struct drm_amdgpu_cs_chunk_fence *data,
@@ -803,8 +804,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
        amdgpu_bo_unref(&parser->uf_entry.robj);
 }
 
-static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
+static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 {
+       struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
        struct amdgpu_device *adev = p->adev;
        struct amdgpu_vm *vm = &fpriv->vm;
@@ -813,6 +815,71 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
        struct amdgpu_bo *bo;
        int r;
 
+       /* Only for UVD/VCE VM emulation */
+       if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
+               unsigned i, j;
+
+               for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
+                       struct drm_amdgpu_cs_chunk_ib *chunk_ib;
+                       struct amdgpu_bo_va_mapping *m;
+                       struct amdgpu_bo *aobj = NULL;
+                       struct amdgpu_cs_chunk *chunk;
+                       uint64_t offset, va_start;
+                       struct amdgpu_ib *ib;
+                       uint8_t *kptr;
+
+                       chunk = &p->chunks[i];
+                       ib = &p->job->ibs[j];
+                       chunk_ib = chunk->kdata;
+
+                       if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
+                               continue;
+
+                       va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
+                       r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
+                       if (r) {
+                               DRM_ERROR("IB va_start is invalid\n");
+                               return r;
+                       }
+
+                       if ((va_start + chunk_ib->ib_bytes) >
+                           (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+                               DRM_ERROR("IB va_start+ib_bytes is invalid\n");
+                               return -EINVAL;
+                       }
+
+                       /* the IB should be reserved at this point */
+                       r = amdgpu_bo_kmap(aobj, (void **)&kptr);
+                       if (r) {
+                               return r;
+                       }
+
+                       offset = m->start * AMDGPU_GPU_PAGE_SIZE;
+                       kptr += va_start - offset;
+
+                       if (ring->funcs->parse_cs) {
+                               memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
+                               amdgpu_bo_kunmap(aobj);
+
+                               r = amdgpu_ring_parse_cs(ring, p, j);
+                               if (r)
+                                       return r;
+                       } else {
+                               ib->ptr = (uint32_t *)kptr;
+                               r = amdgpu_ring_patch_cs_in_place(ring, p, j);
+                               amdgpu_bo_kunmap(aobj);
+                               if (r)
+                                       return r;
+                       }
+
+                       j++;
+               }
+       }
+
+       if (!p->job->vm)
+               return amdgpu_cs_sync_rings(p);
+
+
        r = amdgpu_vm_clear_freed(adev, vm, NULL);
        if (r)
                return r;
@@ -875,6 +942,12 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
        if (r)
                return r;
 
+       r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
+       if (r)
+               return r;
+
+       p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
+
        if (amdgpu_vm_debug) {
                /* Invalidate all BOs to test for userspace bugs */
                amdgpu_bo_list_for_each_entry(e, p->bo_list) {
@@ -886,90 +959,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
                }
        }
 
-       return r;
-}
-
-static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
-                                struct amdgpu_cs_parser *p)
-{
-       struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
-       struct amdgpu_vm *vm = &fpriv->vm;
-       struct amdgpu_ring *ring = p->ring;
-       int r;
-
-       /* Only for UVD/VCE VM emulation */
-       if (p->ring->funcs->parse_cs || p->ring->funcs->patch_cs_in_place) {
-               unsigned i, j;
-
-               for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
-                       struct drm_amdgpu_cs_chunk_ib *chunk_ib;
-                       struct amdgpu_bo_va_mapping *m;
-                       struct amdgpu_bo *aobj = NULL;
-                       struct amdgpu_cs_chunk *chunk;
-                       uint64_t offset, va_start;
-                       struct amdgpu_ib *ib;
-                       uint8_t *kptr;
-
-                       chunk = &p->chunks[i];
-                       ib = &p->job->ibs[j];
-                       chunk_ib = chunk->kdata;
-
-                       if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
-                               continue;
-
-                       va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK;
-                       r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
-                       if (r) {
-                               DRM_ERROR("IB va_start is invalid\n");
-                               return r;
-                       }
-
-                       if ((va_start + chunk_ib->ib_bytes) >
-                           (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
-                               DRM_ERROR("IB va_start+ib_bytes is invalid\n");
-                               return -EINVAL;
-                       }
-
-                       /* the IB should be reserved at this point */
-                       r = amdgpu_bo_kmap(aobj, (void **)&kptr);
-                       if (r) {
-                               return r;
-                       }
-
-                       offset = m->start * AMDGPU_GPU_PAGE_SIZE;
-                       kptr += va_start - offset;
-
-                       if (p->ring->funcs->parse_cs) {
-                               memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
-                               amdgpu_bo_kunmap(aobj);
-
-                               r = amdgpu_ring_parse_cs(ring, p, j);
-                               if (r)
-                                       return r;
-                       } else {
-                               ib->ptr = (uint32_t *)kptr;
-                               r = amdgpu_ring_patch_cs_in_place(ring, p, j);
-                               amdgpu_bo_kunmap(aobj);
-                               if (r)
-                                       return r;
-                       }
-
-                       j++;
-               }
-       }
-
-       if (p->job->vm) {
-               p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
-
-               r = amdgpu_bo_vm_update_pte(p);
-               if (r)
-                       return r;
-
-               r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
-               if (r)
-                       return r;
-       }
-
        return amdgpu_cs_sync_rings(p);
 }
 
@@ -978,14 +967,15 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 {
        struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
        struct amdgpu_vm *vm = &fpriv->vm;
-       int i, j;
        int r, ce_preempt = 0, de_preempt = 0;
+       struct amdgpu_ring *ring;
+       int i, j;
 
        for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
                struct amdgpu_cs_chunk *chunk;
                struct amdgpu_ib *ib;
                struct drm_amdgpu_cs_chunk_ib *chunk_ib;
-               struct amdgpu_ring *ring;
+               struct drm_sched_entity *entity;
 
                chunk = &parser->chunks[i];
                ib = &parser->job->ibs[j];
@@ -1007,27 +997,24 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
                                return -EINVAL;
                }
 
-               r = amdgpu_queue_mgr_map(adev, &parser->ctx->queue_mgr, chunk_ib->ip_type,
-                                        chunk_ib->ip_instance, chunk_ib->ring, &ring);
+               r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
+                                         chunk_ib->ip_instance, chunk_ib->ring,
+                                         &entity);
                if (r)
                        return r;
 
-               if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
-                       parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
-                       if (!parser->ctx->preamble_presented) {
-                               parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
-                               parser->ctx->preamble_presented = true;
-                       }
-               }
+               if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
+                       parser->job->preamble_status |=
+                               AMDGPU_PREAMBLE_IB_PRESENT;
 
-               if (parser->ring && parser->ring != ring)
+               if (parser->entity && parser->entity != entity)
                        return -EINVAL;
 
-               parser->ring = ring;
+               parser->entity = entity;
 
-               r =  amdgpu_ib_get(adev, vm,
-                                       ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
-                                       ib);
+               ring = to_amdgpu_ring(entity->rq->sched);
+               r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
+                                  chunk_ib->ib_bytes : 0, ib);
                if (r) {
                        DRM_ERROR("Failed to get ib !\n");
                        return r;
@@ -1041,12 +1028,13 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
        }
 
        /* UVD & VCE fw doesn't support user fences */
+       ring = to_amdgpu_ring(parser->entity->rq->sched);
        if (parser->job->uf_addr && (
-           parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
-           parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
+           ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+           ring->funcs->type == AMDGPU_RING_TYPE_VCE))
                return -EINVAL;
 
-       return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx);
+       return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
 }
 
 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@@ -1062,24 +1050,23 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
                sizeof(struct drm_amdgpu_cs_chunk_dep);
 
        for (i = 0; i < num_deps; ++i) {
-               struct amdgpu_ring *ring;
                struct amdgpu_ctx *ctx;
+               struct drm_sched_entity *entity;
                struct dma_fence *fence;
 
                ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
                if (ctx == NULL)
                        return -EINVAL;
 
-               r = amdgpu_queue_mgr_map(p->adev, &ctx->queue_mgr,
-                                        deps[i].ip_type,
-                                        deps[i].ip_instance,
-                                        deps[i].ring, &ring);
+               r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
+                                         deps[i].ip_instance,
+                                         deps[i].ring, &entity);
                if (r) {
                        amdgpu_ctx_put(ctx);
                        return r;
                }
 
-               fence = amdgpu_ctx_get_fence(ctx, ring,
+               fence = amdgpu_ctx_get_fence(ctx, entity,
                                             deps[i].handle);
                if (IS_ERR(fence)) {
                        r = PTR_ERR(fence);
@@ -1198,49 +1185,45 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
                            union drm_amdgpu_cs *cs)
 {
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
-       struct amdgpu_ring *ring = p->ring;
-       struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+       struct drm_sched_entity *entity = p->entity;
        enum drm_sched_priority priority;
+       struct amdgpu_ring *ring;
        struct amdgpu_bo_list_entry *e;
        struct amdgpu_job *job;
        uint64_t seq;
 
        int r;
 
+       job = p->job;
+       p->job = NULL;
+
+       r = drm_sched_job_init(&job->base, entity, p->filp);
+       if (r)
+               goto error_unlock;
+
+       /* No memory allocation is allowed while holding the mn lock */
        amdgpu_mn_lock(p->mn);
        amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
                struct amdgpu_bo *bo = e->robj;
 
                if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
-                       amdgpu_mn_unlock(p->mn);
-                       return -ERESTARTSYS;
+                       r = -ERESTARTSYS;
+                       goto error_abort;
                }
        }
 
-       job = p->job;
-       p->job = NULL;
-
-       r = drm_sched_job_init(&job->base, entity, p->filp);
-       if (r) {
-               amdgpu_job_free(job);
-               amdgpu_mn_unlock(p->mn);
-               return r;
-       }
-
        job->owner = p->filp;
        p->fence = dma_fence_get(&job->base.s_fence->finished);
 
-       r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
-       if (r) {
-               dma_fence_put(p->fence);
-               dma_fence_put(&job->base.s_fence->finished);
-               amdgpu_job_free(job);
-               amdgpu_mn_unlock(p->mn);
-               return r;
-       }
-
+       amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
        amdgpu_cs_post_dependencies(p);
 
+       if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
+           !p->ctx->preamble_presented) {
+               job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+               p->ctx->preamble_presented = true;
+       }
+
        cs->out.handle = seq;
        job->uf_sequence = seq;
 
@@ -1254,10 +1237,21 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        ring = to_amdgpu_ring(entity->rq->sched);
        amdgpu_ring_priority_get(ring, priority);
 
+       amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
+
        ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
        amdgpu_mn_unlock(p->mn);
 
        return 0;
+
+error_abort:
+       dma_fence_put(&job->base.s_fence->finished);
+       job->base.s_fence = NULL;
+       amdgpu_mn_unlock(p->mn);
+
+error_unlock:
+       amdgpu_job_free(job);
+       return r;
 }
 
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
@@ -1304,7 +1298,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        for (i = 0; i < parser.job->num_ibs; i++)
                trace_amdgpu_cs(&parser, i);
 
-       r = amdgpu_cs_ib_vm_chunk(adev, &parser);
+       r = amdgpu_cs_vm_handling(&parser);
        if (r)
                goto out;
 
@@ -1328,9 +1322,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
                         struct drm_file *filp)
 {
        union drm_amdgpu_wait_cs *wait = data;
-       struct amdgpu_device *adev = dev->dev_private;
        unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
-       struct amdgpu_ring *ring = NULL;
+       struct drm_sched_entity *entity;
        struct amdgpu_ctx *ctx;
        struct dma_fence *fence;
        long r;
@@ -1339,15 +1332,14 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
        if (ctx == NULL)
                return -EINVAL;
 
-       r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr,
-                                wait->in.ip_type, wait->in.ip_instance,
-                                wait->in.ring, &ring);
+       r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
+                                 wait->in.ring, &entity);
        if (r) {
                amdgpu_ctx_put(ctx);
                return r;
        }
 
-       fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
+       fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
        if (IS_ERR(fence))
                r = PTR_ERR(fence);
        else if (fence) {
@@ -1379,7 +1371,7 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
                                             struct drm_file *filp,
                                             struct drm_amdgpu_fence *user)
 {
-       struct amdgpu_ring *ring;
+       struct drm_sched_entity *entity;
        struct amdgpu_ctx *ctx;
        struct dma_fence *fence;
        int r;
@@ -1388,14 +1380,14 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
        if (ctx == NULL)
                return ERR_PTR(-EINVAL);
 
-       r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, user->ip_type,
-                                user->ip_instance, user->ring, &ring);
+       r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
+                                 user->ring, &entity);
        if (r) {
                amdgpu_ctx_put(ctx);
                return ERR_PTR(r);
        }
 
-       fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
+       fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
        amdgpu_ctx_put(ctx);
 
        return fence;