if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
continue;
- va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK;
+ va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
if (r) {
DRM_ERROR("IB va_start is invalid\n");
if (r)
return r;
- p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
+ p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
if (amdgpu_vm_debug) {
/* Invalidate all BOs to test for userspace bugs */
int r;
+ job = p->job;
+ p->job = NULL;
+
+ r = drm_sched_job_init(&job->base, entity, p->filp);
+ if (r)
+ goto error_unlock;
+
+ /* No memory allocation is allowed while holding the mn lock */
amdgpu_mn_lock(p->mn);
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = e->robj;
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
- amdgpu_mn_unlock(p->mn);
- return -ERESTARTSYS;
+ r = -ERESTARTSYS;
+ goto error_abort;
}
}
- job = p->job;
- p->job = NULL;
-
- r = drm_sched_job_init(&job->base, entity, p->filp);
- if (r) {
- amdgpu_job_free(job);
- amdgpu_mn_unlock(p->mn);
- return r;
- }
-
job->owner = p->filp;
p->fence = dma_fence_get(&job->base.s_fence->finished);
- r = amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
- if (r) {
- dma_fence_put(p->fence);
- dma_fence_put(&job->base.s_fence->finished);
- amdgpu_job_free(job);
- amdgpu_mn_unlock(p->mn);
- return r;
- }
-
+ amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
amdgpu_cs_post_dependencies(p);
if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
ring = to_amdgpu_ring(entity->rq->sched);
amdgpu_ring_priority_get(ring, priority);
+ amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
+
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
amdgpu_mn_unlock(p->mn);
return 0;
+
+error_abort:
+ dma_fence_put(&job->base.s_fence->finished);
+ job->base.s_fence = NULL;
+ amdgpu_mn_unlock(p->mn);
+
+error_unlock:
+ amdgpu_job_free(job);
+ return r;
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)