2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
29 #include "amdgpu_trace.h"
31 static void amdgpu_job_timedout(struct amd_sched_job *s_job)
33 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
35 DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
36 job->base.sched->name,
37 atomic_read(&job->ring->fence_drv.last_seq),
38 job->ring->fence_drv.sync_seq);
40 amdgpu_gpu_recover(job->adev, job);
43 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
44 struct amdgpu_job **job, struct amdgpu_vm *vm)
46 size_t size = sizeof(struct amdgpu_job);
51 size += sizeof(struct amdgpu_ib) * num_ibs;
53 *job = kzalloc(size, GFP_KERNEL);
59 (*job)->ibs = (void *)&(*job)[1];
60 (*job)->num_ibs = num_ibs;
62 amdgpu_sync_create(&(*job)->sync);
63 amdgpu_sync_create(&(*job)->dep_sync);
64 amdgpu_sync_create(&(*job)->sched_sync);
65 (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
70 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
71 struct amdgpu_job **job)
75 r = amdgpu_job_alloc(adev, 1, job, NULL);
79 r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
83 (*job)->vm_pd_addr = adev->gart.table_addr;
88 void amdgpu_job_free_resources(struct amdgpu_job *job)
93 /* use sched fence if available */
94 f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
96 for (i = 0; i < job->num_ibs; ++i)
97 amdgpu_ib_free(job->adev, &job->ibs[i], f);
100 static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
102 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
104 amdgpu_ring_priority_put(job->ring, s_job->s_priority);
105 dma_fence_put(job->fence);
106 amdgpu_sync_free(&job->sync);
107 amdgpu_sync_free(&job->dep_sync);
108 amdgpu_sync_free(&job->sched_sync);
112 void amdgpu_job_free(struct amdgpu_job *job)
114 amdgpu_job_free_resources(job);
116 dma_fence_put(job->fence);
117 amdgpu_sync_free(&job->sync);
118 amdgpu_sync_free(&job->dep_sync);
119 amdgpu_sync_free(&job->sched_sync);
123 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
124 struct amd_sched_entity *entity, void *owner,
125 struct dma_fence **f)
133 r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
138 job->fence_ctx = entity->fence_context;
139 *f = dma_fence_get(&job->base.s_fence->finished);
140 amdgpu_job_free_resources(job);
141 amdgpu_ring_priority_get(job->ring, job->base.s_priority);
142 amd_sched_entity_push_job(&job->base, entity);
147 static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
148 struct amd_sched_entity *s_entity)
150 struct amdgpu_job *job = to_amdgpu_job(sched_job);
151 struct amdgpu_vm *vm = job->vm;
153 struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync);
156 if (amd_sched_dependency_optimized(fence, s_entity)) {
157 r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence);
159 DRM_ERROR("Error adding fence to sync (%d)\n", r);
162 fence = amdgpu_sync_get_fence(&job->sync);
163 while (fence == NULL && vm && !job->vm_id) {
164 struct amdgpu_ring *ring = job->ring;
166 r = amdgpu_vm_grab_id(vm, ring, &job->sync,
167 &job->base.s_fence->finished,
170 DRM_ERROR("Error getting VM ID (%d)\n", r);
172 fence = amdgpu_sync_get_fence(&job->sync);
178 static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
180 struct dma_fence *fence = NULL, *finished;
181 struct amdgpu_device *adev;
182 struct amdgpu_job *job;
186 DRM_ERROR("job is null\n");
189 job = to_amdgpu_job(sched_job);
190 finished = &job->base.s_fence->finished;
193 BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
195 trace_amdgpu_sched_run_job(job);
197 if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
198 dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
200 if (finished->error < 0) {
201 DRM_INFO("Skip scheduling IBs!\n");
203 r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
206 DRM_ERROR("Error scheduling IBs (%d)\n", r);
208 /* if gpu reset, hw fence will be replaced here */
209 dma_fence_put(job->fence);
210 job->fence = dma_fence_get(fence);
212 amdgpu_job_free_resources(job);
216 const struct amd_sched_backend_ops amdgpu_sched_ops = {
217 .dependency = amdgpu_job_dependency,
218 .run_job = amdgpu_job_run,
219 .timedout_job = amdgpu_job_timedout,
220 .free_job = amdgpu_job_free_cb