2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
25 #include <linux/list.h>
26 #include <linux/pagemap.h>
27 #include <linux/sched/mm.h>
29 #include "amdgpu_object.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
33 /* Special VM and GART address alignment needed for VI pre-Fiji due to
36 #define VI_BO_SIZE_ALIGN (0x8000)
38 /* BO flag to indicate a KFD userptr BO */
39 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
41 /* Userptr restore delay, just long enough to allow consecutive VM
42 * changes to accumulate
44 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
46 /* Impose limit on how much memory KFD can use */
48 uint64_t max_system_mem_limit;
49 uint64_t max_userptr_mem_limit;
50 int64_t system_mem_used;
51 int64_t userptr_mem_used;
52 spinlock_t mem_limit_lock;
55 /* Struct used for amdgpu_amdkfd_bo_validate */
56 struct amdgpu_vm_parser {
61 static const char * const domain_bit_to_string[] = {
70 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
72 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
75 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
77 return (struct amdgpu_device *)kgd;
80 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
83 struct kfd_bo_va_list *entry;
85 list_for_each_entry(entry, &mem->bo_va_list, bo_list)
86 if (entry->bo_va->base.vm == avm)
92 /* Set memory usage limits. Current, limits are
93 * System (kernel) memory - 3/8th System RAM
94 * Userptr memory - 3/4th System RAM
96 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
102 mem = si.totalram - si.totalhigh;
105 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
106 kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
107 kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2);
108 pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
109 (kfd_mem_limit.max_system_mem_limit >> 20),
110 (kfd_mem_limit.max_userptr_mem_limit >> 20));
113 static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
114 uint64_t size, u32 domain)
119 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
120 sizeof(struct amdgpu_bo));
122 spin_lock(&kfd_mem_limit.mem_limit_lock);
123 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
124 if (kfd_mem_limit.system_mem_used + (acc_size + size) >
125 kfd_mem_limit.max_system_mem_limit) {
129 kfd_mem_limit.system_mem_used += (acc_size + size);
130 } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
131 if ((kfd_mem_limit.system_mem_used + acc_size >
132 kfd_mem_limit.max_system_mem_limit) ||
133 (kfd_mem_limit.userptr_mem_used + (size + acc_size) >
134 kfd_mem_limit.max_userptr_mem_limit)) {
138 kfd_mem_limit.system_mem_used += acc_size;
139 kfd_mem_limit.userptr_mem_used += size;
142 spin_unlock(&kfd_mem_limit.mem_limit_lock);
146 static void unreserve_system_mem_limit(struct amdgpu_device *adev,
147 uint64_t size, u32 domain)
151 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
152 sizeof(struct amdgpu_bo));
154 spin_lock(&kfd_mem_limit.mem_limit_lock);
155 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
156 kfd_mem_limit.system_mem_used -= (acc_size + size);
157 } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
158 kfd_mem_limit.system_mem_used -= acc_size;
159 kfd_mem_limit.userptr_mem_used -= size;
161 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
162 "kfd system memory accounting unbalanced");
163 WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
164 "kfd userptr memory accounting unbalanced");
166 spin_unlock(&kfd_mem_limit.mem_limit_lock);
169 void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
171 spin_lock(&kfd_mem_limit.mem_limit_lock);
173 if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
174 kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
175 kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo);
176 } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
177 kfd_mem_limit.system_mem_used -=
178 (bo->tbo.acc_size + amdgpu_bo_size(bo));
180 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
181 "kfd system memory accounting unbalanced");
182 WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
183 "kfd userptr memory accounting unbalanced");
185 spin_unlock(&kfd_mem_limit.mem_limit_lock);
189 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence(s) from BO's
190 * reservation object.
192 * @bo: [IN] Remove eviction fence(s) from this BO
193 * @ef: [IN] If ef is specified, then this eviction fence is removed if it
194 * is present in the shared list.
195 * @ef_list: [OUT] Returns list of eviction fences. These fences are removed
196 * from BO's reservation object shared list.
197 * @ef_count: [OUT] Number of fences in ef_list.
199 * NOTE: If called with ef_list, then amdgpu_amdkfd_add_eviction_fence must be
200 * called to restore the eviction fences and to avoid memory leak. This is
201 * useful for shared BOs.
202 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
204 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
205 struct amdgpu_amdkfd_fence *ef,
206 struct amdgpu_amdkfd_fence ***ef_list,
207 unsigned int *ef_count)
209 struct reservation_object *resv = bo->tbo.resv;
210 struct reservation_object_list *old, *new;
211 unsigned int i, j, k;
221 old = reservation_object_get_list(resv);
225 new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
230 /* Go through all the shared fences in the resevation object and sort
231 * the interesting ones to the end of the list.
233 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
236 f = rcu_dereference_protected(old->shared[i],
237 reservation_object_held(resv));
239 if ((ef && f->context == ef->base.context) ||
240 (!ef && to_amdgpu_amdkfd_fence(f)))
241 RCU_INIT_POINTER(new->shared[--j], f);
243 RCU_INIT_POINTER(new->shared[k++], f);
245 new->shared_max = old->shared_max;
246 new->shared_count = k;
249 unsigned int count = old->shared_count - j;
251 /* Alloc memory for count number of eviction fence pointers.
252 * Fill the ef_list array and ef_count
254 *ef_list = kcalloc(count, sizeof(**ef_list), GFP_KERNEL);
263 /* Install the new fence list, seqcount provides the barriers */
265 write_seqcount_begin(&resv->seq);
266 RCU_INIT_POINTER(resv->fence, new);
267 write_seqcount_end(&resv->seq);
270 /* Drop the references to the removed fences or move them to ef_list */
271 for (i = j, k = 0; i < old->shared_count; ++i) {
274 f = rcu_dereference_protected(new->shared[i],
275 reservation_object_held(resv));
277 (*ef_list)[k++] = to_amdgpu_amdkfd_fence(f);
286 /* amdgpu_amdkfd_add_eviction_fence - Adds eviction fence(s) back into BO's
287 * reservation object.
289 * @bo: [IN] Add eviction fences to this BO
290 * @ef_list: [IN] List of eviction fences to be added
291 * @ef_count: [IN] Number of fences in ef_list.
293 * NOTE: Must call amdgpu_amdkfd_remove_eviction_fence before calling this
296 static void amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo *bo,
297 struct amdgpu_amdkfd_fence **ef_list,
298 unsigned int ef_count)
302 if (!ef_list || !ef_count)
305 for (i = 0; i < ef_count; i++) {
306 amdgpu_bo_fence(bo, &ef_list[i]->base, true);
307 /* Re-adding the fence takes an additional reference. Drop that
310 dma_fence_put(&ef_list[i]->base);
316 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
319 struct ttm_operation_ctx ctx = { false, false };
322 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
323 "Called with userptr BO"))
326 amdgpu_bo_placement_from_domain(bo, domain);
328 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
332 struct amdgpu_amdkfd_fence **ef_list;
333 unsigned int ef_count;
335 ret = amdgpu_amdkfd_remove_eviction_fence(bo, NULL, &ef_list,
340 ttm_bo_wait(&bo->tbo, false, false);
341 amdgpu_amdkfd_add_eviction_fence(bo, ef_list, ef_count);
348 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
350 struct amdgpu_vm_parser *p = param;
352 return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
355 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
357 * Page directories are not updated here because huge page handling
358 * during page table updates can invalidate page directory entries
359 * again. Page directories are only updated after updating page
362 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
364 struct amdgpu_bo *pd = vm->root.base.bo;
365 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
366 struct amdgpu_vm_parser param;
369 param.domain = AMDGPU_GEM_DOMAIN_VRAM;
372 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
375 pr_err("amdgpu: failed to validate PT BOs\n");
379 ret = amdgpu_amdkfd_validate(¶m, pd);
381 pr_err("amdgpu: failed to validate PD\n");
385 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
387 if (vm->use_cpu_for_update) {
388 ret = amdgpu_bo_kmap(pd, NULL);
390 pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
398 static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
401 int ret = amdgpu_sync_fence(adev, sync, f, false);
403 /* Sync objects can't handle multiple GPUs (contexts) updating
404 * sync->last_vm_update. Fortunately we don't need it for
405 * KFD's purposes, so we can just drop that fence.
407 if (sync->last_vm_update) {
408 dma_fence_put(sync->last_vm_update);
409 sync->last_vm_update = NULL;
415 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
417 struct amdgpu_bo *pd = vm->root.base.bo;
418 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
421 ret = amdgpu_vm_update_directories(adev, vm);
425 return sync_vm_fence(adev, sync, vm->last_update);
428 /* add_bo_to_vm - Add a BO to a VM
430 * Everything that needs to bo done only once when a BO is first added
431 * to a VM. It can later be mapped and unmapped many times without
432 * repeating these steps.
434 * 1. Allocate and initialize BO VA entry data structure
435 * 2. Add BO to the VM
436 * 3. Determine ASIC-specific PTE flags
437 * 4. Alloc page tables and directories if needed
438 * 4a. Validate new page tables and directories
440 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
441 struct amdgpu_vm *vm, bool is_aql,
442 struct kfd_bo_va_list **p_bo_va_entry)
445 struct kfd_bo_va_list *bo_va_entry;
446 struct amdgpu_bo *pd = vm->root.base.bo;
447 struct amdgpu_bo *bo = mem->bo;
448 uint64_t va = mem->va;
449 struct list_head *list_bo_va = &mem->bo_va_list;
450 unsigned long bo_size = bo->tbo.mem.size;
453 pr_err("Invalid VA when adding BO to VM\n");
460 bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
464 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
467 /* Add BO to VM internal data structures*/
468 bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
469 if (!bo_va_entry->bo_va) {
471 pr_err("Failed to add BO object to VM. ret == %d\n",
476 bo_va_entry->va = va;
477 bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
479 bo_va_entry->kgd_dev = (void *)adev;
480 list_add(&bo_va_entry->bo_list, list_bo_va);
483 *p_bo_va_entry = bo_va_entry;
485 /* Allocate new page tables if needed and validate
486 * them. Clearing of new page tables and validate need to wait
487 * on move fences. We don't want that to trigger the eviction
488 * fence, so remove it temporarily.
490 amdgpu_amdkfd_remove_eviction_fence(pd,
491 vm->process_info->eviction_fence,
494 ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
496 pr_err("Failed to allocate pts, err=%d\n", ret);
500 ret = vm_validate_pt_pd_bos(vm);
502 pr_err("validate_pt_pd_bos() failed\n");
506 /* Add the eviction fence back */
507 amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
512 amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
513 amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
514 list_del(&bo_va_entry->bo_list);
520 static void remove_bo_from_vm(struct amdgpu_device *adev,
521 struct kfd_bo_va_list *entry, unsigned long size)
523 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
525 entry->va + size, entry);
526 amdgpu_vm_bo_rmv(adev, entry->bo_va);
527 list_del(&entry->bo_list);
531 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
532 struct amdkfd_process_info *process_info,
535 struct ttm_validate_buffer *entry = &mem->validate_list;
536 struct amdgpu_bo *bo = mem->bo;
538 INIT_LIST_HEAD(&entry->head);
539 entry->shared = true;
540 entry->bo = &bo->tbo;
541 mutex_lock(&process_info->lock);
543 list_add_tail(&entry->head, &process_info->userptr_valid_list);
545 list_add_tail(&entry->head, &process_info->kfd_bo_list);
546 mutex_unlock(&process_info->lock);
549 /* Initializes user pages. It registers the MMU notifier and validates
550 * the userptr BO in the GTT domain.
552 * The BO must already be on the userptr_valid_list. Otherwise an
553 * eviction and restore may happen that leaves the new BO unmapped
554 * with the user mode queues running.
556 * Takes the process_info->lock to protect against concurrent restore
559 * Returns 0 for success, negative errno for errors.
561 static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
564 struct amdkfd_process_info *process_info = mem->process_info;
565 struct amdgpu_bo *bo = mem->bo;
566 struct ttm_operation_ctx ctx = { true, false };
569 mutex_lock(&process_info->lock);
571 ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
573 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
577 ret = amdgpu_mn_register(bo, user_addr);
579 pr_err("%s: Failed to register MMU notifier: %d\n",
584 /* If no restore worker is running concurrently, user_pages
585 * should not be allocated
587 WARN(mem->user_pages, "Leaking user_pages array");
589 mem->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
590 sizeof(struct page *),
591 GFP_KERNEL | __GFP_ZERO);
592 if (!mem->user_pages) {
593 pr_err("%s: Failed to allocate pages array\n", __func__);
598 ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, mem->user_pages);
600 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
604 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->user_pages);
606 ret = amdgpu_bo_reserve(bo, true);
608 pr_err("%s: Failed to reserve BO\n", __func__);
611 amdgpu_bo_placement_from_domain(bo, mem->domain);
612 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
614 pr_err("%s: failed to validate BO\n", __func__);
615 amdgpu_bo_unreserve(bo);
619 release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
621 kvfree(mem->user_pages);
622 mem->user_pages = NULL;
625 amdgpu_mn_unregister(bo);
627 mutex_unlock(&process_info->lock);
631 /* Reserving a BO and its page table BOs must happen atomically to
632 * avoid deadlocks. Some operations update multiple VMs at once. Track
633 * all the reservation info in a context structure. Optionally a sync
634 * object can track VM updates.
636 struct bo_vm_reservation_context {
637 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
638 unsigned int n_vms; /* Number of VMs reserved */
639 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
640 struct ww_acquire_ctx ticket; /* Reservation ticket */
641 struct list_head list, duplicates; /* BO lists */
642 struct amdgpu_sync *sync; /* Pointer to sync object */
643 bool reserved; /* Whether BOs are reserved */
647 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
648 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
649 BO_VM_ALL, /* Match all VMs a BO was added to */
653 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
654 * @mem: KFD BO structure.
655 * @vm: the VM to reserve.
656 * @ctx: the struct that will be used in unreserve_bo_and_vms().
658 static int reserve_bo_and_vm(struct kgd_mem *mem,
659 struct amdgpu_vm *vm,
660 struct bo_vm_reservation_context *ctx)
662 struct amdgpu_bo *bo = mem->bo;
667 ctx->reserved = false;
669 ctx->sync = &mem->sync;
671 INIT_LIST_HEAD(&ctx->list);
672 INIT_LIST_HEAD(&ctx->duplicates);
674 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
678 ctx->kfd_bo.priority = 0;
679 ctx->kfd_bo.tv.bo = &bo->tbo;
680 ctx->kfd_bo.tv.shared = true;
681 ctx->kfd_bo.user_pages = NULL;
682 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
684 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
686 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
687 false, &ctx->duplicates);
689 ctx->reserved = true;
691 pr_err("Failed to reserve buffers in ttm\n");
700 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
701 * @mem: KFD BO structure.
702 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
703 * is used. Otherwise, a single VM associated with the BO.
704 * @map_type: the mapping status that will be used to filter the VMs.
705 * @ctx: the struct that will be used in unreserve_bo_and_vms().
707 * Returns 0 for success, negative for failure.
709 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
710 struct amdgpu_vm *vm, enum bo_vm_match map_type,
711 struct bo_vm_reservation_context *ctx)
713 struct amdgpu_bo *bo = mem->bo;
714 struct kfd_bo_va_list *entry;
718 ctx->reserved = false;
721 ctx->sync = &mem->sync;
723 INIT_LIST_HEAD(&ctx->list);
724 INIT_LIST_HEAD(&ctx->duplicates);
726 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
727 if ((vm && vm != entry->bo_va->base.vm) ||
728 (entry->is_mapped != map_type
729 && map_type != BO_VM_ALL))
735 if (ctx->n_vms != 0) {
736 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
742 ctx->kfd_bo.priority = 0;
743 ctx->kfd_bo.tv.bo = &bo->tbo;
744 ctx->kfd_bo.tv.shared = true;
745 ctx->kfd_bo.user_pages = NULL;
746 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
749 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
750 if ((vm && vm != entry->bo_va->base.vm) ||
751 (entry->is_mapped != map_type
752 && map_type != BO_VM_ALL))
755 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
760 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
761 false, &ctx->duplicates);
763 ctx->reserved = true;
765 pr_err("Failed to reserve buffers in ttm.\n");
776 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
777 * @ctx: Reservation context to unreserve
778 * @wait: Optionally wait for a sync object representing pending VM updates
779 * @intr: Whether the wait is interruptible
781 * Also frees any resources allocated in
782 * reserve_bo_and_(cond_)vm(s). Returns the status from
785 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
786 bool wait, bool intr)
791 ret = amdgpu_sync_wait(ctx->sync, intr);
794 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
799 ctx->reserved = false;
805 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
806 struct kfd_bo_va_list *entry,
807 struct amdgpu_sync *sync)
809 struct amdgpu_bo_va *bo_va = entry->bo_va;
810 struct amdgpu_vm *vm = bo_va->base.vm;
811 struct amdgpu_bo *pd = vm->root.base.bo;
813 /* Remove eviction fence from PD (and thereby from PTs too as
814 * they share the resv. object). Otherwise during PT update
815 * job (see amdgpu_vm_bo_update_mapping), eviction fence would
816 * get added to job->sync object and job execution would
817 * trigger the eviction fence.
819 amdgpu_amdkfd_remove_eviction_fence(pd,
820 vm->process_info->eviction_fence,
822 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
824 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
826 /* Add the eviction fence back */
827 amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
829 sync_vm_fence(adev, sync, bo_va->last_pt_update);
834 static int update_gpuvm_pte(struct amdgpu_device *adev,
835 struct kfd_bo_va_list *entry,
836 struct amdgpu_sync *sync)
839 struct amdgpu_vm *vm;
840 struct amdgpu_bo_va *bo_va;
841 struct amdgpu_bo *bo;
843 bo_va = entry->bo_va;
847 /* Update the page tables */
848 ret = amdgpu_vm_bo_update(adev, bo_va, false);
850 pr_err("amdgpu_vm_bo_update failed\n");
854 return sync_vm_fence(adev, sync, bo_va->last_pt_update);
857 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
858 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
863 /* Set virtual address for the allocation */
864 ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
865 amdgpu_bo_size(entry->bo_va->base.bo),
868 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
876 ret = update_gpuvm_pte(adev, entry, sync);
878 pr_err("update_gpuvm_pte() failed\n");
879 goto update_gpuvm_pte_failed;
884 update_gpuvm_pte_failed:
885 unmap_bo_from_gpuvm(adev, entry, sync);
889 static int process_validate_vms(struct amdkfd_process_info *process_info)
891 struct amdgpu_vm *peer_vm;
894 list_for_each_entry(peer_vm, &process_info->vm_list_head,
896 ret = vm_validate_pt_pd_bos(peer_vm);
904 static int process_update_pds(struct amdkfd_process_info *process_info,
905 struct amdgpu_sync *sync)
907 struct amdgpu_vm *peer_vm;
910 list_for_each_entry(peer_vm, &process_info->vm_list_head,
912 ret = vm_update_pds(peer_vm, sync);
920 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
921 struct dma_fence **ef)
923 struct amdkfd_process_info *info = NULL;
926 if (!*process_info) {
927 info = kzalloc(sizeof(*info), GFP_KERNEL);
931 mutex_init(&info->lock);
932 INIT_LIST_HEAD(&info->vm_list_head);
933 INIT_LIST_HEAD(&info->kfd_bo_list);
934 INIT_LIST_HEAD(&info->userptr_valid_list);
935 INIT_LIST_HEAD(&info->userptr_inval_list);
937 info->eviction_fence =
938 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
940 if (!info->eviction_fence) {
941 pr_err("Failed to create eviction fence\n");
943 goto create_evict_fence_fail;
946 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
947 atomic_set(&info->evicted_bos, 0);
948 INIT_DELAYED_WORK(&info->restore_userptr_work,
949 amdgpu_amdkfd_restore_userptr_worker);
951 *process_info = info;
952 *ef = dma_fence_get(&info->eviction_fence->base);
955 vm->process_info = *process_info;
957 /* Validate page directory and attach eviction fence */
958 ret = amdgpu_bo_reserve(vm->root.base.bo, true);
960 goto reserve_pd_fail;
961 ret = vm_validate_pt_pd_bos(vm);
963 pr_err("validate_pt_pd_bos() failed\n");
964 goto validate_pd_fail;
966 ret = ttm_bo_wait(&vm->root.base.bo->tbo, false, false);
969 amdgpu_bo_fence(vm->root.base.bo,
970 &vm->process_info->eviction_fence->base, true);
971 amdgpu_bo_unreserve(vm->root.base.bo);
973 /* Update process info */
974 mutex_lock(&vm->process_info->lock);
975 list_add_tail(&vm->vm_list_node,
976 &(vm->process_info->vm_list_head));
977 vm->process_info->n_vms++;
978 mutex_unlock(&vm->process_info->lock);
984 amdgpu_bo_unreserve(vm->root.base.bo);
986 vm->process_info = NULL;
988 /* Two fence references: one in info and one in *ef */
989 dma_fence_put(&info->eviction_fence->base);
992 *process_info = NULL;
994 create_evict_fence_fail:
995 mutex_destroy(&info->lock);
1001 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
1002 void **vm, void **process_info,
1003 struct dma_fence **ef)
1005 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1006 struct amdgpu_vm *new_vm;
1009 new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1013 /* Initialize AMDGPU part of the VM */
1014 ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
1016 pr_err("Failed init vm ret %d\n", ret);
1017 goto amdgpu_vm_init_fail;
1020 /* Initialize KFD part of the VM and process info */
1021 ret = init_kfd_vm(new_vm, process_info, ef);
1023 goto init_kfd_vm_fail;
1025 *vm = (void *) new_vm;
1030 amdgpu_vm_fini(adev, new_vm);
1031 amdgpu_vm_init_fail:
1036 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1037 struct file *filp, unsigned int pasid,
1038 void **vm, void **process_info,
1039 struct dma_fence **ef)
1041 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1042 struct drm_file *drm_priv = filp->private_data;
1043 struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1044 struct amdgpu_vm *avm = &drv_priv->vm;
1047 /* Already a compute VM? */
1048 if (avm->process_info)
1051 /* Convert VM into a compute VM */
1052 ret = amdgpu_vm_make_compute(adev, avm, pasid);
1056 /* Initialize KFD part of the VM and process info */
1057 ret = init_kfd_vm(avm, process_info, ef);
1066 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1067 struct amdgpu_vm *vm)
1069 struct amdkfd_process_info *process_info = vm->process_info;
1070 struct amdgpu_bo *pd = vm->root.base.bo;
1075 /* Release eviction fence from PD */
1076 amdgpu_bo_reserve(pd, false);
1077 amdgpu_bo_fence(pd, NULL, false);
1078 amdgpu_bo_unreserve(pd);
1080 /* Update process info */
1081 mutex_lock(&process_info->lock);
1082 process_info->n_vms--;
1083 list_del(&vm->vm_list_node);
1084 mutex_unlock(&process_info->lock);
1086 /* Release per-process resources when last compute VM is destroyed */
1087 if (!process_info->n_vms) {
1088 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1089 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1090 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1092 dma_fence_put(&process_info->eviction_fence->base);
1093 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1094 put_pid(process_info->pid);
1095 mutex_destroy(&process_info->lock);
1096 kfree(process_info);
1100 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1102 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1103 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1105 if (WARN_ON(!kgd || !vm))
1108 pr_debug("Destroying process vm %p\n", vm);
1110 /* Release the VM context */
1111 amdgpu_vm_fini(adev, avm);
1115 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1117 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1118 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1120 if (WARN_ON(!kgd || !vm))
1123 pr_debug("Releasing process vm %p\n", vm);
1125 /* The original pasid of amdgpu vm has already been
1126 * released during making a amdgpu vm to a compute vm
1127 * The current pasid is managed by kfd and will be
1128 * released on kfd process destroy. Set amdgpu pasid
1129 * to 0 to avoid duplicate release.
1131 amdgpu_vm_release_compute(adev, avm);
1134 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1136 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1137 struct amdgpu_bo *pd = avm->root.base.bo;
1138 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1140 if (adev->asic_type < CHIP_VEGA10)
1141 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1142 return avm->pd_phys_addr;
1145 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1146 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1147 void *vm, struct kgd_mem **mem,
1148 uint64_t *offset, uint32_t flags)
1150 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1151 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1152 uint64_t user_addr = 0;
1153 struct amdgpu_bo *bo;
1154 struct amdgpu_bo_param bp;
1156 u32 domain, alloc_domain;
1158 uint32_t mapping_flags;
1162 * Check on which domain to allocate BO
1164 if (flags & ALLOC_MEM_FLAGS_VRAM) {
1165 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1166 alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
1167 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1168 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1169 AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1170 } else if (flags & ALLOC_MEM_FLAGS_GTT) {
1171 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1173 } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1174 domain = AMDGPU_GEM_DOMAIN_GTT;
1175 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1177 if (!offset || !*offset)
1179 user_addr = *offset;
1184 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1187 INIT_LIST_HEAD(&(*mem)->bo_va_list);
1188 mutex_init(&(*mem)->lock);
1189 (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1191 /* Workaround for AQL queue wraparound bug. Map the same
1192 * memory twice. That means we only actually allocate half
1195 if ((*mem)->aql_queue)
1198 /* Workaround for TLB bug on older VI chips */
1199 byte_align = (adev->family == AMDGPU_FAMILY_VI &&
1200 adev->asic_type != CHIP_FIJI &&
1201 adev->asic_type != CHIP_POLARIS10 &&
1202 adev->asic_type != CHIP_POLARIS11) ?
1203 VI_BO_SIZE_ALIGN : 1;
1205 mapping_flags = AMDGPU_VM_PAGE_READABLE;
1206 if (flags & ALLOC_MEM_FLAGS_WRITABLE)
1207 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
1208 if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
1209 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1210 if (flags & ALLOC_MEM_FLAGS_COHERENT)
1211 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1213 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1214 (*mem)->mapping_flags = mapping_flags;
1216 amdgpu_sync_create(&(*mem)->sync);
1218 ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
1220 pr_debug("Insufficient system memory\n");
1221 goto err_reserve_system_mem;
1224 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1225 va, size, domain_string(alloc_domain));
1227 memset(&bp, 0, sizeof(bp));
1229 bp.byte_align = byte_align;
1230 bp.domain = alloc_domain;
1231 bp.flags = alloc_flags;
1232 bp.type = ttm_bo_type_device;
1234 ret = amdgpu_bo_create(adev, &bp, &bo);
1236 pr_debug("Failed to create BO on domain %s. ret %d\n",
1237 domain_string(alloc_domain), ret);
1243 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1246 (*mem)->domain = domain;
1247 (*mem)->mapped_to_gpu_memory = 0;
1248 (*mem)->process_info = avm->process_info;
1249 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1252 ret = init_user_pages(*mem, current->mm, user_addr);
1254 mutex_lock(&avm->process_info->lock);
1255 list_del(&(*mem)->validate_list.head);
1256 mutex_unlock(&avm->process_info->lock);
1257 goto allocate_init_user_pages_failed;
1262 *offset = amdgpu_bo_mmap_offset(bo);
1266 allocate_init_user_pages_failed:
1267 amdgpu_bo_unref(&bo);
1268 /* Don't unreserve system mem limit twice */
1269 goto err_reserve_system_mem;
1271 unreserve_system_mem_limit(adev, size, alloc_domain);
1272 err_reserve_system_mem:
1273 mutex_destroy(&(*mem)->lock);
1278 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1279 struct kgd_dev *kgd, struct kgd_mem *mem)
1281 struct amdkfd_process_info *process_info = mem->process_info;
1282 unsigned long bo_size = mem->bo->tbo.mem.size;
1283 struct kfd_bo_va_list *entry, *tmp;
1284 struct bo_vm_reservation_context ctx;
1285 struct ttm_validate_buffer *bo_list_entry;
1288 mutex_lock(&mem->lock);
1290 if (mem->mapped_to_gpu_memory > 0) {
1291 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1293 mutex_unlock(&mem->lock);
1297 mutex_unlock(&mem->lock);
1298 /* lock is not needed after this, since mem is unused and will
1302 /* No more MMU notifiers */
1303 amdgpu_mn_unregister(mem->bo);
1305 /* Make sure restore workers don't access the BO any more */
1306 bo_list_entry = &mem->validate_list;
1307 mutex_lock(&process_info->lock);
1308 list_del(&bo_list_entry->head);
1309 mutex_unlock(&process_info->lock);
1311 /* Free user pages if necessary */
1312 if (mem->user_pages) {
1313 pr_debug("%s: Freeing user_pages array\n", __func__);
1314 if (mem->user_pages[0])
1315 release_pages(mem->user_pages,
1316 mem->bo->tbo.ttm->num_pages);
1317 kvfree(mem->user_pages);
1320 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1324 /* The eviction fence should be removed by the last unmap.
1325 * TODO: Log an error condition if the bo still has the eviction fence
1328 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1329 process_info->eviction_fence,
1331 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1332 mem->va + bo_size * (1 + mem->aql_queue));
1334 /* Remove from VM internal data structures */
1335 list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1336 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1339 ret = unreserve_bo_and_vms(&ctx, false, false);
1341 /* Free the sync object */
1342 amdgpu_sync_free(&mem->sync);
1345 amdgpu_bo_unref(&mem->bo);
1346 mutex_destroy(&mem->lock);
1352 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1353 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1355 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1356 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1358 struct amdgpu_bo *bo;
1360 struct kfd_bo_va_list *entry;
1361 struct bo_vm_reservation_context ctx;
1362 struct kfd_bo_va_list *bo_va_entry = NULL;
1363 struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1364 unsigned long bo_size;
1365 bool is_invalid_userptr = false;
1369 pr_err("Invalid BO when mapping memory to GPU\n");
1373 /* Make sure restore is not running concurrently. Since we
1374 * don't map invalid userptr BOs, we rely on the next restore
1375 * worker to do the mapping
1377 mutex_lock(&mem->process_info->lock);
1379 /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1380 * sure that the MMU notifier is no longer running
1381 * concurrently and the queues are actually stopped
1383 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1384 down_write(¤t->mm->mmap_sem);
1385 is_invalid_userptr = atomic_read(&mem->invalid);
1386 up_write(¤t->mm->mmap_sem);
1389 mutex_lock(&mem->lock);
1391 domain = mem->domain;
1392 bo_size = bo->tbo.mem.size;
1394 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1396 mem->va + bo_size * (1 + mem->aql_queue),
1397 vm, domain_string(domain));
1399 ret = reserve_bo_and_vm(mem, vm, &ctx);
1403 /* Userptr can be marked as "not invalid", but not actually be
1404 * validated yet (still in the system domain). In that case
1405 * the queues are still stopped and we can leave mapping for
1406 * the next restore worker
1408 if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1409 is_invalid_userptr = true;
1411 if (check_if_add_bo_to_vm(avm, mem)) {
1412 ret = add_bo_to_vm(adev, mem, avm, false,
1415 goto add_bo_to_vm_failed;
1416 if (mem->aql_queue) {
1417 ret = add_bo_to_vm(adev, mem, avm,
1418 true, &bo_va_entry_aql);
1420 goto add_bo_to_vm_failed_aql;
1423 ret = vm_validate_pt_pd_bos(avm);
1425 goto add_bo_to_vm_failed;
1428 if (mem->mapped_to_gpu_memory == 0 &&
1429 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1430 /* Validate BO only once. The eviction fence gets added to BO
1431 * the first time it is mapped. Validate will wait for all
1432 * background evictions to complete.
1434 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1436 pr_debug("Validate failed\n");
1437 goto map_bo_to_gpuvm_failed;
1441 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1442 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1443 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1444 entry->va, entry->va + bo_size,
1447 ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1448 is_invalid_userptr);
1450 pr_err("Failed to map radeon bo to gpuvm\n");
1451 goto map_bo_to_gpuvm_failed;
1454 ret = vm_update_pds(vm, ctx.sync);
1456 pr_err("Failed to update page directories\n");
1457 goto map_bo_to_gpuvm_failed;
1460 entry->is_mapped = true;
1461 mem->mapped_to_gpu_memory++;
1462 pr_debug("\t INC mapping count %d\n",
1463 mem->mapped_to_gpu_memory);
1467 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1469 &avm->process_info->eviction_fence->base,
1471 ret = unreserve_bo_and_vms(&ctx, false, false);
1475 map_bo_to_gpuvm_failed:
1476 if (bo_va_entry_aql)
1477 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1478 add_bo_to_vm_failed_aql:
1480 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1481 add_bo_to_vm_failed:
1482 unreserve_bo_and_vms(&ctx, false, false);
1484 mutex_unlock(&mem->process_info->lock);
1485 mutex_unlock(&mem->lock);
1489 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1490 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1492 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1493 struct amdkfd_process_info *process_info =
1494 ((struct amdgpu_vm *)vm)->process_info;
1495 unsigned long bo_size = mem->bo->tbo.mem.size;
1496 struct kfd_bo_va_list *entry;
1497 struct bo_vm_reservation_context ctx;
1500 mutex_lock(&mem->lock);
1502 ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1505 /* If no VMs were reserved, it means the BO wasn't actually mapped */
1506 if (ctx.n_vms == 0) {
1511 ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1515 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1517 mem->va + bo_size * (1 + mem->aql_queue),
1520 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1521 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1522 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1524 entry->va + bo_size,
1527 ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1529 entry->is_mapped = false;
1531 pr_err("failed to unmap VA 0x%llx\n",
1536 mem->mapped_to_gpu_memory--;
1537 pr_debug("\t DEC mapping count %d\n",
1538 mem->mapped_to_gpu_memory);
1542 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1545 if (mem->mapped_to_gpu_memory == 0 &&
1546 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1547 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1548 process_info->eviction_fence,
1552 unreserve_bo_and_vms(&ctx, false, false);
1554 mutex_unlock(&mem->lock);
1558 int amdgpu_amdkfd_gpuvm_sync_memory(
1559 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1561 struct amdgpu_sync sync;
1564 amdgpu_sync_create(&sync);
1566 mutex_lock(&mem->lock);
1567 amdgpu_sync_clone(&mem->sync, &sync);
1568 mutex_unlock(&mem->lock);
1570 ret = amdgpu_sync_wait(&sync, intr);
1571 amdgpu_sync_free(&sync);
1575 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1576 struct kgd_mem *mem, void **kptr, uint64_t *size)
1579 struct amdgpu_bo *bo = mem->bo;
1581 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1582 pr_err("userptr can't be mapped to kernel\n");
1586 /* delete kgd_mem from kfd_bo_list to avoid re-validating
1587 * this BO in BO's restoring after eviction.
1589 mutex_lock(&mem->process_info->lock);
1591 ret = amdgpu_bo_reserve(bo, true);
1593 pr_err("Failed to reserve bo. ret %d\n", ret);
1594 goto bo_reserve_failed;
1597 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1599 pr_err("Failed to pin bo. ret %d\n", ret);
1603 ret = amdgpu_bo_kmap(bo, kptr);
1605 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1609 amdgpu_amdkfd_remove_eviction_fence(
1610 bo, mem->process_info->eviction_fence, NULL, NULL);
1611 list_del_init(&mem->validate_list.head);
1614 *size = amdgpu_bo_size(bo);
1616 amdgpu_bo_unreserve(bo);
1618 mutex_unlock(&mem->process_info->lock);
1622 amdgpu_bo_unpin(bo);
1624 amdgpu_bo_unreserve(bo);
1626 mutex_unlock(&mem->process_info->lock);
1631 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1632 struct kfd_vm_fault_info *mem)
1634 struct amdgpu_device *adev;
1636 adev = (struct amdgpu_device *)kgd;
1637 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1638 *mem = *adev->gmc.vm_fault_info;
1640 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1645 /* Evict a userptr BO by stopping the queues if necessary
1647 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1648 * cannot do any memory allocations, and cannot take any locks that
1649 * are held elsewhere while allocating memory. Therefore this is as
1650 * simple as possible, using atomic counters.
1652 * It doesn't do anything to the BO itself. The real work happens in
1653 * restore, where we get updated page addresses. This function only
1654 * ensures that GPU access to the BO is stopped.
1656 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1657 struct mm_struct *mm)
1659 struct amdkfd_process_info *process_info = mem->process_info;
1660 int invalid, evicted_bos;
1663 invalid = atomic_inc_return(&mem->invalid);
1664 evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1665 if (evicted_bos == 1) {
1666 /* First eviction, stop the queues */
1667 r = kgd2kfd->quiesce_mm(mm);
1669 pr_err("Failed to quiesce KFD\n");
1670 schedule_delayed_work(&process_info->restore_userptr_work,
1671 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1677 /* Update invalid userptr BOs
1679 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1680 * userptr_inval_list and updates user pages for all BOs that have
1681 * been invalidated since their last update.
1683 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1684 struct mm_struct *mm)
1686 struct kgd_mem *mem, *tmp_mem;
1687 struct amdgpu_bo *bo;
1688 struct ttm_operation_ctx ctx = { false, false };
1691 /* Move all invalidated BOs to the userptr_inval_list and
1692 * release their user pages by migration to the CPU domain
1694 list_for_each_entry_safe(mem, tmp_mem,
1695 &process_info->userptr_valid_list,
1696 validate_list.head) {
1697 if (!atomic_read(&mem->invalid))
1698 continue; /* BO is still valid */
1702 if (amdgpu_bo_reserve(bo, true))
1704 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1705 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1706 amdgpu_bo_unreserve(bo);
1708 pr_err("%s: Failed to invalidate userptr BO\n",
1713 list_move_tail(&mem->validate_list.head,
1714 &process_info->userptr_inval_list);
1717 if (list_empty(&process_info->userptr_inval_list))
1718 return 0; /* All evicted userptr BOs were freed */
1720 /* Go through userptr_inval_list and update any invalid user_pages */
1721 list_for_each_entry(mem, &process_info->userptr_inval_list,
1722 validate_list.head) {
1723 invalid = atomic_read(&mem->invalid);
1725 /* BO hasn't been invalidated since the last
1726 * revalidation attempt. Keep its BO list.
1732 if (!mem->user_pages) {
1734 kvmalloc_array(bo->tbo.ttm->num_pages,
1735 sizeof(struct page *),
1736 GFP_KERNEL | __GFP_ZERO);
1737 if (!mem->user_pages) {
1738 pr_err("%s: Failed to allocate pages array\n",
1742 } else if (mem->user_pages[0]) {
1743 release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
1746 /* Get updated user pages */
1747 ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
1750 mem->user_pages[0] = NULL;
1751 pr_info("%s: Failed to get user pages: %d\n",
1753 /* Pretend it succeeded. It will fail later
1754 * with a VM fault if the GPU tries to access
1755 * it. Better than hanging indefinitely with
1756 * stalled user mode queues.
1760 /* Mark the BO as valid unless it was invalidated
1761 * again concurrently
1763 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1770 /* Validate invalid userptr BOs
1772 * Validates BOs on the userptr_inval_list, and moves them back to the
1773 * userptr_valid_list. Also updates GPUVM page tables with new page
1774 * addresses and waits for the page table updates to complete.
1776 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1778 struct amdgpu_bo_list_entry *pd_bo_list_entries;
1779 struct list_head resv_list, duplicates;
1780 struct ww_acquire_ctx ticket;
1781 struct amdgpu_sync sync;
1783 struct amdgpu_vm *peer_vm;
1784 struct kgd_mem *mem, *tmp_mem;
1785 struct amdgpu_bo *bo;
1786 struct ttm_operation_ctx ctx = { false, false };
1789 pd_bo_list_entries = kcalloc(process_info->n_vms,
1790 sizeof(struct amdgpu_bo_list_entry),
1792 if (!pd_bo_list_entries) {
1793 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1797 INIT_LIST_HEAD(&resv_list);
1798 INIT_LIST_HEAD(&duplicates);
1800 /* Get all the page directory BOs that need to be reserved */
1802 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1804 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1805 &pd_bo_list_entries[i++]);
1806 /* Add the userptr_inval_list entries to resv_list */
1807 list_for_each_entry(mem, &process_info->userptr_inval_list,
1808 validate_list.head) {
1809 list_add_tail(&mem->resv_list.head, &resv_list);
1810 mem->resv_list.bo = mem->validate_list.bo;
1811 mem->resv_list.shared = mem->validate_list.shared;
1814 /* Reserve all BOs and page tables for validation */
1815 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1816 WARN(!list_empty(&duplicates), "Duplicates should be empty");
1820 amdgpu_sync_create(&sync);
1822 /* Avoid triggering eviction fences when unmapping invalid
1823 * userptr BOs (waits for all fences, doesn't use
1826 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1828 amdgpu_amdkfd_remove_eviction_fence(peer_vm->root.base.bo,
1829 process_info->eviction_fence,
1832 ret = process_validate_vms(process_info);
1836 /* Validate BOs and update GPUVM page tables */
1837 list_for_each_entry_safe(mem, tmp_mem,
1838 &process_info->userptr_inval_list,
1839 validate_list.head) {
1840 struct kfd_bo_va_list *bo_va_entry;
1844 /* Copy pages array and validate the BO if we got user pages */
1845 if (mem->user_pages[0]) {
1846 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
1848 amdgpu_bo_placement_from_domain(bo, mem->domain);
1849 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1851 pr_err("%s: failed to validate BO\n", __func__);
1856 /* Validate succeeded, now the BO owns the pages, free
1857 * our copy of the pointer array. Put this BO back on
1858 * the userptr_valid_list. If we need to revalidate
1859 * it, we need to start from scratch.
1861 kvfree(mem->user_pages);
1862 mem->user_pages = NULL;
1863 list_move_tail(&mem->validate_list.head,
1864 &process_info->userptr_valid_list);
1866 /* Update mapping. If the BO was not validated
1867 * (because we couldn't get user pages), this will
1868 * clear the page table entries, which will result in
1869 * VM faults if the GPU tries to access the invalid
1872 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1873 if (!bo_va_entry->is_mapped)
1876 ret = update_gpuvm_pte((struct amdgpu_device *)
1877 bo_va_entry->kgd_dev,
1878 bo_va_entry, &sync);
1880 pr_err("%s: update PTE failed\n", __func__);
1881 /* make sure this gets validated again */
1882 atomic_inc(&mem->invalid);
1888 /* Update page directories */
1889 ret = process_update_pds(process_info, &sync);
1892 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1894 amdgpu_bo_fence(peer_vm->root.base.bo,
1895 &process_info->eviction_fence->base, true);
1896 ttm_eu_backoff_reservation(&ticket, &resv_list);
1897 amdgpu_sync_wait(&sync, false);
1898 amdgpu_sync_free(&sync);
1900 kfree(pd_bo_list_entries);
1905 /* Worker callback to restore evicted userptr BOs
1907 * Tries to update and validate all userptr BOs. If successful and no
1908 * concurrent evictions happened, the queues are restarted. Otherwise,
1909 * reschedule for another attempt later.
1911 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1913 struct delayed_work *dwork = to_delayed_work(work);
1914 struct amdkfd_process_info *process_info =
1915 container_of(dwork, struct amdkfd_process_info,
1916 restore_userptr_work);
1917 struct task_struct *usertask;
1918 struct mm_struct *mm;
1921 evicted_bos = atomic_read(&process_info->evicted_bos);
1925 /* Reference task and mm in case of concurrent process termination */
1926 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1929 mm = get_task_mm(usertask);
1931 put_task_struct(usertask);
1935 mutex_lock(&process_info->lock);
1937 if (update_invalid_user_pages(process_info, mm))
1939 /* userptr_inval_list can be empty if all evicted userptr BOs
1940 * have been freed. In that case there is nothing to validate
1941 * and we can just restart the queues.
1943 if (!list_empty(&process_info->userptr_inval_list)) {
1944 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1945 goto unlock_out; /* Concurrent eviction, try again */
1947 if (validate_invalid_user_pages(process_info))
1950 /* Final check for concurrent evicton and atomic update. If
1951 * another eviction happens after successful update, it will
1952 * be a first eviction that calls quiesce_mm. The eviction
1953 * reference counting inside KFD will handle this case.
1955 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1959 if (kgd2kfd->resume_mm(mm)) {
1960 pr_err("%s: Failed to resume KFD\n", __func__);
1961 /* No recovery from this failure. Probably the CP is
1962 * hanging. No point trying again.
1966 mutex_unlock(&process_info->lock);
1968 put_task_struct(usertask);
1970 /* If validation failed, reschedule another attempt */
1972 schedule_delayed_work(&process_info->restore_userptr_work,
1973 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1976 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1977 * KFD process identified by process_info
1979 * @process_info: amdkfd_process_info of the KFD process
1981 * After memory eviction, restore thread calls this function. The function
1982 * should be called when the Process is still valid. BO restore involves -
1984 * 1. Release old eviction fence and create new one
1985 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1986 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1987 * BOs that need to be reserved.
1988 * 4. Reserve all the BOs
1989 * 5. Validate of PD and PT BOs.
1990 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1991 * 7. Add fence to all PD and PT BOs.
1992 * 8. Unreserve all BOs
1994 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1996 struct amdgpu_bo_list_entry *pd_bo_list;
1997 struct amdkfd_process_info *process_info = info;
1998 struct amdgpu_vm *peer_vm;
1999 struct kgd_mem *mem;
2000 struct bo_vm_reservation_context ctx;
2001 struct amdgpu_amdkfd_fence *new_fence;
2003 struct list_head duplicate_save;
2004 struct amdgpu_sync sync_obj;
2006 INIT_LIST_HEAD(&duplicate_save);
2007 INIT_LIST_HEAD(&ctx.list);
2008 INIT_LIST_HEAD(&ctx.duplicates);
2010 pd_bo_list = kcalloc(process_info->n_vms,
2011 sizeof(struct amdgpu_bo_list_entry),
2017 mutex_lock(&process_info->lock);
2018 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2020 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2022 /* Reserve all BOs and page tables/directory. Add all BOs from
2023 * kfd_bo_list to ctx.list
2025 list_for_each_entry(mem, &process_info->kfd_bo_list,
2026 validate_list.head) {
2028 list_add_tail(&mem->resv_list.head, &ctx.list);
2029 mem->resv_list.bo = mem->validate_list.bo;
2030 mem->resv_list.shared = mem->validate_list.shared;
2033 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2034 false, &duplicate_save);
2036 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2037 goto ttm_reserve_fail;
2040 amdgpu_sync_create(&sync_obj);
2042 /* Validate PDs and PTs */
2043 ret = process_validate_vms(process_info);
2045 goto validate_map_fail;
2047 /* Wait for PD/PTs validate to finish */
2048 /* FIXME: I think this isn't needed */
2049 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2051 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2053 ttm_bo_wait(&bo->tbo, false, false);
2056 /* Validate BOs and map them to GPUVM (update VM page tables). */
2057 list_for_each_entry(mem, &process_info->kfd_bo_list,
2058 validate_list.head) {
2060 struct amdgpu_bo *bo = mem->bo;
2061 uint32_t domain = mem->domain;
2062 struct kfd_bo_va_list *bo_va_entry;
2064 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2066 pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2067 goto validate_map_fail;
2070 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2072 ret = update_gpuvm_pte((struct amdgpu_device *)
2073 bo_va_entry->kgd_dev,
2077 pr_debug("Memory eviction: update PTE failed. Try again\n");
2078 goto validate_map_fail;
2083 /* Update page directories */
2084 ret = process_update_pds(process_info, &sync_obj);
2086 pr_debug("Memory eviction: update PDs failed. Try again\n");
2087 goto validate_map_fail;
2090 amdgpu_sync_wait(&sync_obj, false);
2092 /* Release old eviction fence and create new one, because fence only
2093 * goes from unsignaled to signaled, fence cannot be reused.
2094 * Use context and mm from the old fence.
2096 new_fence = amdgpu_amdkfd_fence_create(
2097 process_info->eviction_fence->base.context,
2098 process_info->eviction_fence->mm);
2100 pr_err("Failed to create eviction fence\n");
2102 goto validate_map_fail;
2104 dma_fence_put(&process_info->eviction_fence->base);
2105 process_info->eviction_fence = new_fence;
2106 *ef = dma_fence_get(&new_fence->base);
2108 /* Wait for validate to finish and attach new eviction fence */
2109 list_for_each_entry(mem, &process_info->kfd_bo_list,
2111 ttm_bo_wait(&mem->bo->tbo, false, false);
2112 list_for_each_entry(mem, &process_info->kfd_bo_list,
2114 amdgpu_bo_fence(mem->bo,
2115 &process_info->eviction_fence->base, true);
2117 /* Attach eviction fence to PD / PT BOs */
2118 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2120 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2122 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2126 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2127 amdgpu_sync_free(&sync_obj);
2129 mutex_unlock(&process_info->lock);