]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drm/amdgpu: stop checking GPU reset counter during VMID grab
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ids.c
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu_ids.h"
24
25 #include <linux/idr.h>
26 #include <linux/dma-fence-array.h>
27 #include <drm/drmP.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_trace.h"
31
32 /*
33  * PASID manager
34  *
35  * PASIDs are global address space identifiers that can be shared
36  * between the GPU, an IOMMU and the driver. VMs on different devices
37  * may use the same PASID if they share the same address
38  * space. Therefore PASIDs are allocated using a global IDA. VMs are
39  * looked up from the PASID per amdgpu_device.
40  */
41 static DEFINE_IDA(amdgpu_pasid_ida);
42
43 /* Helper to free pasid from a fence callback */
44 struct amdgpu_pasid_cb {
45         struct dma_fence_cb cb;
46         unsigned int pasid;
47 };
48
49 /**
50  * amdgpu_pasid_alloc - Allocate a PASID
51  * @bits: Maximum width of the PASID in bits, must be at least 1
52  *
53  * Allocates a PASID of the given width while keeping smaller PASIDs
54  * available if possible.
55  *
56  * Returns a positive integer on success. Returns %-EINVAL if bits==0.
57  * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
58  * memory allocation failure.
59  */
60 int amdgpu_pasid_alloc(unsigned int bits)
61 {
62         int pasid = -EINVAL;
63
64         for (bits = min(bits, 31U); bits > 0; bits--) {
65                 pasid = ida_simple_get(&amdgpu_pasid_ida,
66                                        1U << (bits - 1), 1U << bits,
67                                        GFP_KERNEL);
68                 if (pasid != -ENOSPC)
69                         break;
70         }
71
72         if (pasid >= 0)
73                 trace_amdgpu_pasid_allocated(pasid);
74
75         return pasid;
76 }
77
78 /**
79  * amdgpu_pasid_free - Free a PASID
80  * @pasid: PASID to free
81  */
82 void amdgpu_pasid_free(unsigned int pasid)
83 {
84         trace_amdgpu_pasid_freed(pasid);
85         ida_simple_remove(&amdgpu_pasid_ida, pasid);
86 }
87
88 static void amdgpu_pasid_free_cb(struct dma_fence *fence,
89                                  struct dma_fence_cb *_cb)
90 {
91         struct amdgpu_pasid_cb *cb =
92                 container_of(_cb, struct amdgpu_pasid_cb, cb);
93
94         amdgpu_pasid_free(cb->pasid);
95         dma_fence_put(fence);
96         kfree(cb);
97 }
98
99 /**
100  * amdgpu_pasid_free_delayed - free pasid when fences signal
101  *
102  * @resv: reservation object with the fences to wait for
103  * @pasid: pasid to free
104  *
105  * Free the pasid only after all the fences in resv are signaled.
106  */
107 void amdgpu_pasid_free_delayed(struct reservation_object *resv,
108                                unsigned int pasid)
109 {
110         struct dma_fence *fence, **fences;
111         struct amdgpu_pasid_cb *cb;
112         unsigned count;
113         int r;
114
115         r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
116         if (r)
117                 goto fallback;
118
119         if (count == 0) {
120                 amdgpu_pasid_free(pasid);
121                 return;
122         }
123
124         if (count == 1) {
125                 fence = fences[0];
126                 kfree(fences);
127         } else {
128                 uint64_t context = dma_fence_context_alloc(1);
129                 struct dma_fence_array *array;
130
131                 array = dma_fence_array_create(count, fences, context,
132                                                1, false);
133                 if (!array) {
134                         kfree(fences);
135                         goto fallback;
136                 }
137                 fence = &array->base;
138         }
139
140         cb = kmalloc(sizeof(*cb), GFP_KERNEL);
141         if (!cb) {
142                 /* Last resort when we are OOM */
143                 dma_fence_wait(fence, false);
144                 dma_fence_put(fence);
145                 amdgpu_pasid_free(pasid);
146         } else {
147                 cb->pasid = pasid;
148                 if (dma_fence_add_callback(fence, &cb->cb,
149                                            amdgpu_pasid_free_cb))
150                         amdgpu_pasid_free_cb(fence, &cb->cb);
151         }
152
153         return;
154
155 fallback:
156         /* Not enough memory for the delayed delete, as last resort
157          * block for all the fences to complete.
158          */
159         reservation_object_wait_timeout_rcu(resv, true, false,
160                                             MAX_SCHEDULE_TIMEOUT);
161         amdgpu_pasid_free(pasid);
162 }
163
164 /*
165  * VMID manager
166  *
167  * VMIDs are a per VMHUB identifier for page tables handling.
168  */
169
170 /**
171  * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
172  *
173  * @adev: amdgpu_device pointer
174  * @id: VMID structure
175  *
176  * Check if GPU reset occured since last use of the VMID.
177  */
178 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
179                                struct amdgpu_vmid *id)
180 {
181         return id->current_gpu_reset_count !=
182                 atomic_read(&adev->gpu_reset_counter);
183 }
184
185 /**
186  * amdgpu_vm_grab_idle - grab idle VMID
187  *
188  * @vm: vm to allocate id for
189  * @ring: ring we want to submit job to
190  * @sync: sync object where we add dependencies
191  * @idle: resulting idle VMID
192  *
193  * Try to find an idle VMID, if none is idle add a fence to wait to the sync
194  * object. Returns -ENOMEM when we are out of memory.
195  */
196 static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
197                                  struct amdgpu_ring *ring,
198                                  struct amdgpu_sync *sync,
199                                  struct amdgpu_vmid **idle)
200 {
201         struct amdgpu_device *adev = ring->adev;
202         unsigned vmhub = ring->funcs->vmhub;
203         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
204         struct dma_fence **fences;
205         unsigned i;
206         int r;
207
208         fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
209         if (!fences)
210                 return -ENOMEM;
211
212         /* Check if we have an idle VMID */
213         i = 0;
214         list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
215                 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
216                 if (!fences[i])
217                         break;
218                 ++i;
219         }
220
221         /* If we can't find a idle VMID to use, wait till one becomes available */
222         if (&(*idle)->list == &id_mgr->ids_lru) {
223                 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
224                 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
225                 struct dma_fence_array *array;
226                 unsigned j;
227
228                 *idle = NULL;
229                 for (j = 0; j < i; ++j)
230                         dma_fence_get(fences[j]);
231
232                 array = dma_fence_array_create(i, fences, fence_context,
233                                                seqno, true);
234                 if (!array) {
235                         for (j = 0; j < i; ++j)
236                                 dma_fence_put(fences[j]);
237                         kfree(fences);
238                         return -ENOMEM;
239                 }
240
241                 r = amdgpu_sync_fence(adev, sync, &array->base, false);
242                 dma_fence_put(&array->base);
243                 return r;
244
245         }
246         kfree(fences);
247
248         return 0;
249 }
250
251 /* idr_mgr->lock must be held */
252 static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
253                                             struct amdgpu_ring *ring,
254                                             struct amdgpu_sync *sync,
255                                             struct dma_fence *fence,
256                                             struct amdgpu_job *job)
257 {
258         struct amdgpu_device *adev = ring->adev;
259         unsigned vmhub = ring->funcs->vmhub;
260         uint64_t fence_context = adev->fence_context + ring->idx;
261         struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
262         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
263         struct dma_fence *updates = sync->last_vm_update;
264         int r = 0;
265         struct dma_fence *flushed, *tmp;
266         bool needs_flush = vm->use_cpu_for_update;
267
268         flushed  = id->flushed_updates;
269         if ((id->owner != vm->entity.fence_context) ||
270             (job->vm_pd_addr != id->pd_gpu_addr) ||
271             (updates && (!flushed || updates->context != flushed->context ||
272                         dma_fence_is_later(updates, flushed))) ||
273             (!id->last_flush || (id->last_flush->context != fence_context &&
274                                  !dma_fence_is_signaled(id->last_flush)))) {
275                 needs_flush = true;
276                 /* to prevent one context starved by another context */
277                 id->pd_gpu_addr = 0;
278                 tmp = amdgpu_sync_peek_fence(&id->active, ring);
279                 if (tmp) {
280                         r = amdgpu_sync_fence(adev, sync, tmp, false);
281                         return r;
282                 }
283         }
284
285         /* Good we can use this VMID. Remember this submission as
286         * user of the VMID.
287         */
288         r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
289         if (r)
290                 goto out;
291
292         if (updates && (!flushed || updates->context != flushed->context ||
293                         dma_fence_is_later(updates, flushed))) {
294                 dma_fence_put(id->flushed_updates);
295                 id->flushed_updates = dma_fence_get(updates);
296         }
297         id->pd_gpu_addr = job->vm_pd_addr;
298         id->owner = vm->entity.fence_context;
299         job->vm_needs_flush = needs_flush;
300         if (needs_flush) {
301                 dma_fence_put(id->last_flush);
302                 id->last_flush = NULL;
303         }
304         job->vmid = id - id_mgr->ids;
305         job->pasid = vm->pasid;
306         trace_amdgpu_vm_grab_id(vm, ring, job);
307 out:
308         return r;
309 }
310
311 /**
312  * amdgpu_vm_grab_id - allocate the next free VMID
313  *
314  * @vm: vm to allocate id for
315  * @ring: ring we want to submit job to
316  * @sync: sync object where we add dependencies
317  * @fence: fence protecting ID from reuse
318  *
319  * Allocate an id for the vm, adding fences to the sync obj as necessary.
320  */
321 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
322                      struct amdgpu_sync *sync, struct dma_fence *fence,
323                      struct amdgpu_job *job)
324 {
325         struct amdgpu_device *adev = ring->adev;
326         unsigned vmhub = ring->funcs->vmhub;
327         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
328         uint64_t fence_context = adev->fence_context + ring->idx;
329         struct dma_fence *updates = sync->last_vm_update;
330         struct amdgpu_vmid *id, *idle;
331         int r = 0;
332
333         mutex_lock(&id_mgr->lock);
334         r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
335         if (r || !idle)
336                 goto error;
337
338         if (vm->reserved_vmid[vmhub]) {
339                 r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync,
340                                                      fence, job);
341                 mutex_unlock(&id_mgr->lock);
342                 return r;
343         }
344
345         job->vm_needs_flush = vm->use_cpu_for_update;
346         /* Check if we can use a VMID already assigned to this VM */
347         list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
348                 struct dma_fence *flushed;
349                 bool needs_flush = vm->use_cpu_for_update;
350
351                 /* Check all the prerequisites to using this VMID */
352                 if (id->owner != vm->entity.fence_context)
353                         continue;
354
355                 if (job->vm_pd_addr != id->pd_gpu_addr)
356                         continue;
357
358                 if (!id->last_flush ||
359                     (id->last_flush->context != fence_context &&
360                      !dma_fence_is_signaled(id->last_flush)))
361                         needs_flush = true;
362
363                 flushed  = id->flushed_updates;
364                 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
365                         needs_flush = true;
366
367                 /* Concurrent flushes are only possible starting with Vega10 */
368                 if (adev->asic_type < CHIP_VEGA10 && needs_flush)
369                         continue;
370
371                 /* Good we can use this VMID. Remember this submission as
372                  * user of the VMID.
373                  */
374                 r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
375                 if (r)
376                         goto error;
377
378                 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
379                         dma_fence_put(id->flushed_updates);
380                         id->flushed_updates = dma_fence_get(updates);
381                 }
382
383                 if (needs_flush)
384                         goto needs_flush;
385                 else
386                         goto no_flush_needed;
387
388         }
389
390         /* Still no ID to use? Then use the idle one found earlier */
391         id = idle;
392
393         /* Remember this submission as user of the VMID */
394         r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
395         if (r)
396                 goto error;
397
398         id->pd_gpu_addr = job->vm_pd_addr;
399         dma_fence_put(id->flushed_updates);
400         id->flushed_updates = dma_fence_get(updates);
401         id->owner = vm->entity.fence_context;
402
403 needs_flush:
404         job->vm_needs_flush = true;
405         dma_fence_put(id->last_flush);
406         id->last_flush = NULL;
407
408 no_flush_needed:
409         list_move_tail(&id->list, &id_mgr->ids_lru);
410
411         job->vmid = id - id_mgr->ids;
412         job->pasid = vm->pasid;
413         trace_amdgpu_vm_grab_id(vm, ring, job);
414
415 error:
416         mutex_unlock(&id_mgr->lock);
417         return r;
418 }
419
420 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
421                                struct amdgpu_vm *vm,
422                                unsigned vmhub)
423 {
424         struct amdgpu_vmid_mgr *id_mgr;
425         struct amdgpu_vmid *idle;
426         int r = 0;
427
428         id_mgr = &adev->vm_manager.id_mgr[vmhub];
429         mutex_lock(&id_mgr->lock);
430         if (vm->reserved_vmid[vmhub])
431                 goto unlock;
432         if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
433             AMDGPU_VM_MAX_RESERVED_VMID) {
434                 DRM_ERROR("Over limitation of reserved vmid\n");
435                 atomic_dec(&id_mgr->reserved_vmid_num);
436                 r = -EINVAL;
437                 goto unlock;
438         }
439         /* Select the first entry VMID */
440         idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
441         list_del_init(&idle->list);
442         vm->reserved_vmid[vmhub] = idle;
443         mutex_unlock(&id_mgr->lock);
444
445         return 0;
446 unlock:
447         mutex_unlock(&id_mgr->lock);
448         return r;
449 }
450
451 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
452                                struct amdgpu_vm *vm,
453                                unsigned vmhub)
454 {
455         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
456
457         mutex_lock(&id_mgr->lock);
458         if (vm->reserved_vmid[vmhub]) {
459                 list_add(&vm->reserved_vmid[vmhub]->list,
460                         &id_mgr->ids_lru);
461                 vm->reserved_vmid[vmhub] = NULL;
462                 atomic_dec(&id_mgr->reserved_vmid_num);
463         }
464         mutex_unlock(&id_mgr->lock);
465 }
466
467 /**
468  * amdgpu_vmid_reset - reset VMID to zero
469  *
470  * @adev: amdgpu device structure
471  * @vmid: vmid number to use
472  *
473  * Reset saved GDW, GWS and OA to force switch on next flush.
474  */
475 void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
476                        unsigned vmid)
477 {
478         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
479         struct amdgpu_vmid *id = &id_mgr->ids[vmid];
480
481         mutex_lock(&id_mgr->lock);
482         id->owner = 0;
483         id->gds_base = 0;
484         id->gds_size = 0;
485         id->gws_base = 0;
486         id->gws_size = 0;
487         id->oa_base = 0;
488         id->oa_size = 0;
489         mutex_unlock(&id_mgr->lock);
490 }
491
492 /**
493  * amdgpu_vmid_reset_all - reset VMID to zero
494  *
495  * @adev: amdgpu device structure
496  *
497  * Reset VMID to force flush on next use
498  */
499 void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
500 {
501         unsigned i, j;
502
503         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
504                 struct amdgpu_vmid_mgr *id_mgr =
505                         &adev->vm_manager.id_mgr[i];
506
507                 for (j = 1; j < id_mgr->num_ids; ++j)
508                         amdgpu_vmid_reset(adev, i, j);
509         }
510 }
511
512 /**
513  * amdgpu_vmid_mgr_init - init the VMID manager
514  *
515  * @adev: amdgpu_device pointer
516  *
517  * Initialize the VM manager structures
518  */
519 void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
520 {
521         unsigned i, j;
522
523         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
524                 struct amdgpu_vmid_mgr *id_mgr =
525                         &adev->vm_manager.id_mgr[i];
526
527                 mutex_init(&id_mgr->lock);
528                 INIT_LIST_HEAD(&id_mgr->ids_lru);
529                 atomic_set(&id_mgr->reserved_vmid_num, 0);
530
531                 /* skip over VMID 0, since it is the system VM */
532                 for (j = 1; j < id_mgr->num_ids; ++j) {
533                         amdgpu_vmid_reset(adev, i, j);
534                         amdgpu_sync_create(&id_mgr->ids[i].active);
535                         list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
536                 }
537         }
538
539         adev->vm_manager.fence_context =
540                 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
541         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
542                 adev->vm_manager.seqno[i] = 0;
543 }
544
545 /**
546  * amdgpu_vmid_mgr_fini - cleanup VM manager
547  *
548  * @adev: amdgpu_device pointer
549  *
550  * Cleanup the VM manager and free resources.
551  */
552 void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
553 {
554         unsigned i, j;
555
556         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
557                 struct amdgpu_vmid_mgr *id_mgr =
558                         &adev->vm_manager.id_mgr[i];
559
560                 mutex_destroy(&id_mgr->lock);
561                 for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
562                         struct amdgpu_vmid *id = &id_mgr->ids[j];
563
564                         amdgpu_sync_free(&id->active);
565                         dma_fence_put(id->flushed_updates);
566                         dma_fence_put(id->last_flush);
567                 }
568         }
569 }