]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
bc4ec6b20a87191a926f3573cef1e45377667e72
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
24
25 #include <linux/dma-buf.h>
26 #include <linux/list.h>
27 #include <linux/pagemap.h>
28 #include <linux/sched/mm.h>
29 #include <linux/sched/task.h>
30
31 #include "amdgpu_object.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_amdkfd.h"
34 #include "amdgpu_dma_buf.h"
35
36 /* Special VM and GART address alignment needed for VI pre-Fiji due to
37  * a HW bug.
38  */
39 #define VI_BO_SIZE_ALIGN (0x8000)
40
41 /* BO flag to indicate a KFD userptr BO */
42 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
43
44 /* Userptr restore delay, just long enough to allow consecutive VM
45  * changes to accumulate
46  */
47 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
48
49 /* Impose limit on how much memory KFD can use */
50 static struct {
51         uint64_t max_system_mem_limit;
52         uint64_t max_ttm_mem_limit;
53         int64_t system_mem_used;
54         int64_t ttm_mem_used;
55         spinlock_t mem_limit_lock;
56 } kfd_mem_limit;
57
58 /* Struct used for amdgpu_amdkfd_bo_validate */
59 struct amdgpu_vm_parser {
60         uint32_t        domain;
61         bool            wait;
62 };
63
64 static const char * const domain_bit_to_string[] = {
65                 "CPU",
66                 "GTT",
67                 "VRAM",
68                 "GDS",
69                 "GWS",
70                 "OA"
71 };
72
73 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
74
75 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
76
77
78 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
79 {
80         return (struct amdgpu_device *)kgd;
81 }
82
83 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
84                 struct kgd_mem *mem)
85 {
86         struct kfd_bo_va_list *entry;
87
88         list_for_each_entry(entry, &mem->bo_va_list, bo_list)
89                 if (entry->bo_va->base.vm == avm)
90                         return false;
91
92         return true;
93 }
94
95 /* Set memory usage limits. Current, limits are
96  *  System (TTM + userptr) memory - 3/4th System RAM
97  *  TTM memory - 3/8th System RAM
98  */
99 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
100 {
101         struct sysinfo si;
102         uint64_t mem;
103
104         si_meminfo(&si);
105         mem = si.totalram - si.totalhigh;
106         mem *= si.mem_unit;
107
108         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
109         kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
110         kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
111         pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
112                 (kfd_mem_limit.max_system_mem_limit >> 20),
113                 (kfd_mem_limit.max_ttm_mem_limit >> 20));
114 }
115
116 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
117                 uint64_t size, u32 domain, bool sg)
118 {
119         size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
120         uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
121         int ret = 0;
122
123         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
124                                        sizeof(struct amdgpu_bo));
125
126         vram_needed = 0;
127         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
128                 /* TTM GTT memory */
129                 system_mem_needed = acc_size + size;
130                 ttm_mem_needed = acc_size + size;
131         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
132                 /* Userptr */
133                 system_mem_needed = acc_size + size;
134                 ttm_mem_needed = acc_size;
135         } else {
136                 /* VRAM and SG */
137                 system_mem_needed = acc_size;
138                 ttm_mem_needed = acc_size;
139                 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
140                         vram_needed = size;
141         }
142
143         spin_lock(&kfd_mem_limit.mem_limit_lock);
144
145         if ((kfd_mem_limit.system_mem_used + system_mem_needed >
146              kfd_mem_limit.max_system_mem_limit) ||
147             (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
148              kfd_mem_limit.max_ttm_mem_limit) ||
149             (adev->kfd.vram_used + vram_needed >
150              adev->gmc.real_vram_size - reserved_for_pt)) {
151                 ret = -ENOMEM;
152         } else {
153                 kfd_mem_limit.system_mem_used += system_mem_needed;
154                 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
155                 adev->kfd.vram_used += vram_needed;
156         }
157
158         spin_unlock(&kfd_mem_limit.mem_limit_lock);
159         return ret;
160 }
161
162 static void unreserve_mem_limit(struct amdgpu_device *adev,
163                 uint64_t size, u32 domain, bool sg)
164 {
165         size_t acc_size;
166
167         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
168                                        sizeof(struct amdgpu_bo));
169
170         spin_lock(&kfd_mem_limit.mem_limit_lock);
171         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
172                 kfd_mem_limit.system_mem_used -= (acc_size + size);
173                 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
174         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
175                 kfd_mem_limit.system_mem_used -= (acc_size + size);
176                 kfd_mem_limit.ttm_mem_used -= acc_size;
177         } else {
178                 kfd_mem_limit.system_mem_used -= acc_size;
179                 kfd_mem_limit.ttm_mem_used -= acc_size;
180                 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
181                         adev->kfd.vram_used -= size;
182                         WARN_ONCE(adev->kfd.vram_used < 0,
183                                   "kfd VRAM memory accounting unbalanced");
184                 }
185         }
186         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
187                   "kfd system memory accounting unbalanced");
188         WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
189                   "kfd TTM memory accounting unbalanced");
190
191         spin_unlock(&kfd_mem_limit.mem_limit_lock);
192 }
193
194 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
195 {
196         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
197         u32 domain = bo->preferred_domains;
198         bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
199
200         if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
201                 domain = AMDGPU_GEM_DOMAIN_CPU;
202                 sg = false;
203         }
204
205         unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
206 }
207
208
209 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
210  *  reservation object.
211  *
212  * @bo: [IN] Remove eviction fence(s) from this BO
213  * @ef: [IN] This eviction fence is removed if it
214  *  is present in the shared list.
215  *
216  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
217  */
218 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
219                                         struct amdgpu_amdkfd_fence *ef)
220 {
221         struct dma_resv *resv = bo->tbo.base.resv;
222         struct dma_resv_list *old, *new;
223         unsigned int i, j, k;
224
225         if (!ef)
226                 return -EINVAL;
227
228         old = dma_resv_get_list(resv);
229         if (!old)
230                 return 0;
231
232         new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
233                       GFP_KERNEL);
234         if (!new)
235                 return -ENOMEM;
236
237         /* Go through all the shared fences in the resevation object and sort
238          * the interesting ones to the end of the list.
239          */
240         for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
241                 struct dma_fence *f;
242
243                 f = rcu_dereference_protected(old->shared[i],
244                                               dma_resv_held(resv));
245
246                 if (f->context == ef->base.context)
247                         RCU_INIT_POINTER(new->shared[--j], f);
248                 else
249                         RCU_INIT_POINTER(new->shared[k++], f);
250         }
251         new->shared_max = old->shared_max;
252         new->shared_count = k;
253
254         rcu_assign_pointer(resv->fence, new);
255
256         /* Drop the references to the removed fences or move them to ef_list */
257         for (i = j, k = 0; i < old->shared_count; ++i) {
258                 struct dma_fence *f;
259
260                 f = rcu_dereference_protected(new->shared[i],
261                                               dma_resv_held(resv));
262                 dma_fence_put(f);
263         }
264         kfree_rcu(old, rcu);
265
266         return 0;
267 }
268
269 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
270                                      bool wait)
271 {
272         struct ttm_operation_ctx ctx = { false, false };
273         int ret;
274
275         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
276                  "Called with userptr BO"))
277                 return -EINVAL;
278
279         amdgpu_bo_placement_from_domain(bo, domain);
280
281         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
282         if (ret)
283                 goto validate_fail;
284         if (wait)
285                 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
286
287 validate_fail:
288         return ret;
289 }
290
291 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
292 {
293         struct amdgpu_vm_parser *p = param;
294
295         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
296 }
297
298 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
299  *
300  * Page directories are not updated here because huge page handling
301  * during page table updates can invalidate page directory entries
302  * again. Page directories are only updated after updating page
303  * tables.
304  */
305 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
306 {
307         struct amdgpu_bo *pd = vm->root.base.bo;
308         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
309         struct amdgpu_vm_parser param;
310         int ret;
311
312         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
313         param.wait = false;
314
315         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
316                                         &param);
317         if (ret) {
318                 pr_err("amdgpu: failed to validate PT BOs\n");
319                 return ret;
320         }
321
322         ret = amdgpu_amdkfd_validate(&param, pd);
323         if (ret) {
324                 pr_err("amdgpu: failed to validate PD\n");
325                 return ret;
326         }
327
328         vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
329
330         if (vm->use_cpu_for_update) {
331                 ret = amdgpu_bo_kmap(pd, NULL);
332                 if (ret) {
333                         pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
334                         return ret;
335                 }
336         }
337
338         return 0;
339 }
340
341 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
342 {
343         struct amdgpu_bo *pd = vm->root.base.bo;
344         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
345         int ret;
346
347         ret = amdgpu_vm_update_directories(adev, vm);
348         if (ret)
349                 return ret;
350
351         return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
352 }
353
354 /* add_bo_to_vm - Add a BO to a VM
355  *
356  * Everything that needs to bo done only once when a BO is first added
357  * to a VM. It can later be mapped and unmapped many times without
358  * repeating these steps.
359  *
360  * 1. Allocate and initialize BO VA entry data structure
361  * 2. Add BO to the VM
362  * 3. Determine ASIC-specific PTE flags
363  * 4. Alloc page tables and directories if needed
364  * 4a.  Validate new page tables and directories
365  */
366 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
367                 struct amdgpu_vm *vm, bool is_aql,
368                 struct kfd_bo_va_list **p_bo_va_entry)
369 {
370         int ret;
371         struct kfd_bo_va_list *bo_va_entry;
372         struct amdgpu_bo *bo = mem->bo;
373         uint64_t va = mem->va;
374         struct list_head *list_bo_va = &mem->bo_va_list;
375         unsigned long bo_size = bo->tbo.mem.size;
376
377         if (!va) {
378                 pr_err("Invalid VA when adding BO to VM\n");
379                 return -EINVAL;
380         }
381
382         if (is_aql)
383                 va += bo_size;
384
385         bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
386         if (!bo_va_entry)
387                 return -ENOMEM;
388
389         pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
390                         va + bo_size, vm);
391
392         /* Add BO to VM internal data structures*/
393         bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
394         if (!bo_va_entry->bo_va) {
395                 ret = -EINVAL;
396                 pr_err("Failed to add BO object to VM. ret == %d\n",
397                                 ret);
398                 goto err_vmadd;
399         }
400
401         bo_va_entry->va = va;
402         bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
403                                                          mem->mapping_flags);
404         bo_va_entry->kgd_dev = (void *)adev;
405         list_add(&bo_va_entry->bo_list, list_bo_va);
406
407         if (p_bo_va_entry)
408                 *p_bo_va_entry = bo_va_entry;
409
410         /* Allocate validate page tables if needed */
411         ret = vm_validate_pt_pd_bos(vm);
412         if (ret) {
413                 pr_err("validate_pt_pd_bos() failed\n");
414                 goto err_alloc_pts;
415         }
416
417         return 0;
418
419 err_alloc_pts:
420         amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
421         list_del(&bo_va_entry->bo_list);
422 err_vmadd:
423         kfree(bo_va_entry);
424         return ret;
425 }
426
427 static void remove_bo_from_vm(struct amdgpu_device *adev,
428                 struct kfd_bo_va_list *entry, unsigned long size)
429 {
430         pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
431                         entry->va,
432                         entry->va + size, entry);
433         amdgpu_vm_bo_rmv(adev, entry->bo_va);
434         list_del(&entry->bo_list);
435         kfree(entry);
436 }
437
438 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
439                                 struct amdkfd_process_info *process_info,
440                                 bool userptr)
441 {
442         struct ttm_validate_buffer *entry = &mem->validate_list;
443         struct amdgpu_bo *bo = mem->bo;
444
445         INIT_LIST_HEAD(&entry->head);
446         entry->num_shared = 1;
447         entry->bo = &bo->tbo;
448         mutex_lock(&process_info->lock);
449         if (userptr)
450                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
451         else
452                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
453         mutex_unlock(&process_info->lock);
454 }
455
456 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
457                 struct amdkfd_process_info *process_info)
458 {
459         struct ttm_validate_buffer *bo_list_entry;
460
461         bo_list_entry = &mem->validate_list;
462         mutex_lock(&process_info->lock);
463         list_del(&bo_list_entry->head);
464         mutex_unlock(&process_info->lock);
465 }
466
467 /* Initializes user pages. It registers the MMU notifier and validates
468  * the userptr BO in the GTT domain.
469  *
470  * The BO must already be on the userptr_valid_list. Otherwise an
471  * eviction and restore may happen that leaves the new BO unmapped
472  * with the user mode queues running.
473  *
474  * Takes the process_info->lock to protect against concurrent restore
475  * workers.
476  *
477  * Returns 0 for success, negative errno for errors.
478  */
479 static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
480                            uint64_t user_addr)
481 {
482         struct amdkfd_process_info *process_info = mem->process_info;
483         struct amdgpu_bo *bo = mem->bo;
484         struct ttm_operation_ctx ctx = { true, false };
485         int ret = 0;
486
487         mutex_lock(&process_info->lock);
488
489         ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
490         if (ret) {
491                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
492                 goto out;
493         }
494
495         ret = amdgpu_mn_register(bo, user_addr);
496         if (ret) {
497                 pr_err("%s: Failed to register MMU notifier: %d\n",
498                        __func__, ret);
499                 goto out;
500         }
501
502         ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
503         if (ret) {
504                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
505                 goto unregister_out;
506         }
507
508         ret = amdgpu_bo_reserve(bo, true);
509         if (ret) {
510                 pr_err("%s: Failed to reserve BO\n", __func__);
511                 goto release_out;
512         }
513         amdgpu_bo_placement_from_domain(bo, mem->domain);
514         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
515         if (ret)
516                 pr_err("%s: failed to validate BO\n", __func__);
517         amdgpu_bo_unreserve(bo);
518
519 release_out:
520         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
521 unregister_out:
522         if (ret)
523                 amdgpu_mn_unregister(bo);
524 out:
525         mutex_unlock(&process_info->lock);
526         return ret;
527 }
528
529 /* Reserving a BO and its page table BOs must happen atomically to
530  * avoid deadlocks. Some operations update multiple VMs at once. Track
531  * all the reservation info in a context structure. Optionally a sync
532  * object can track VM updates.
533  */
534 struct bo_vm_reservation_context {
535         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
536         unsigned int n_vms;                 /* Number of VMs reserved       */
537         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
538         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
539         struct list_head list, duplicates;  /* BO lists                     */
540         struct amdgpu_sync *sync;           /* Pointer to sync object       */
541         bool reserved;                      /* Whether BOs are reserved     */
542 };
543
544 enum bo_vm_match {
545         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
546         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
547         BO_VM_ALL,              /* Match all VMs a BO was added to    */
548 };
549
550 /**
551  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
552  * @mem: KFD BO structure.
553  * @vm: the VM to reserve.
554  * @ctx: the struct that will be used in unreserve_bo_and_vms().
555  */
556 static int reserve_bo_and_vm(struct kgd_mem *mem,
557                               struct amdgpu_vm *vm,
558                               struct bo_vm_reservation_context *ctx)
559 {
560         struct amdgpu_bo *bo = mem->bo;
561         int ret;
562
563         WARN_ON(!vm);
564
565         ctx->reserved = false;
566         ctx->n_vms = 1;
567         ctx->sync = &mem->sync;
568
569         INIT_LIST_HEAD(&ctx->list);
570         INIT_LIST_HEAD(&ctx->duplicates);
571
572         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
573         if (!ctx->vm_pd)
574                 return -ENOMEM;
575
576         ctx->kfd_bo.priority = 0;
577         ctx->kfd_bo.tv.bo = &bo->tbo;
578         ctx->kfd_bo.tv.num_shared = 1;
579         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
580
581         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
582
583         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
584                                      false, &ctx->duplicates, true);
585         if (!ret)
586                 ctx->reserved = true;
587         else {
588                 pr_err("Failed to reserve buffers in ttm\n");
589                 kfree(ctx->vm_pd);
590                 ctx->vm_pd = NULL;
591         }
592
593         return ret;
594 }
595
596 /**
597  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
598  * @mem: KFD BO structure.
599  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
600  * is used. Otherwise, a single VM associated with the BO.
601  * @map_type: the mapping status that will be used to filter the VMs.
602  * @ctx: the struct that will be used in unreserve_bo_and_vms().
603  *
604  * Returns 0 for success, negative for failure.
605  */
606 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
607                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
608                                 struct bo_vm_reservation_context *ctx)
609 {
610         struct amdgpu_bo *bo = mem->bo;
611         struct kfd_bo_va_list *entry;
612         unsigned int i;
613         int ret;
614
615         ctx->reserved = false;
616         ctx->n_vms = 0;
617         ctx->vm_pd = NULL;
618         ctx->sync = &mem->sync;
619
620         INIT_LIST_HEAD(&ctx->list);
621         INIT_LIST_HEAD(&ctx->duplicates);
622
623         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
624                 if ((vm && vm != entry->bo_va->base.vm) ||
625                         (entry->is_mapped != map_type
626                         && map_type != BO_VM_ALL))
627                         continue;
628
629                 ctx->n_vms++;
630         }
631
632         if (ctx->n_vms != 0) {
633                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
634                                      GFP_KERNEL);
635                 if (!ctx->vm_pd)
636                         return -ENOMEM;
637         }
638
639         ctx->kfd_bo.priority = 0;
640         ctx->kfd_bo.tv.bo = &bo->tbo;
641         ctx->kfd_bo.tv.num_shared = 1;
642         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
643
644         i = 0;
645         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
646                 if ((vm && vm != entry->bo_va->base.vm) ||
647                         (entry->is_mapped != map_type
648                         && map_type != BO_VM_ALL))
649                         continue;
650
651                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
652                                 &ctx->vm_pd[i]);
653                 i++;
654         }
655
656         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
657                                      false, &ctx->duplicates, true);
658         if (!ret)
659                 ctx->reserved = true;
660         else
661                 pr_err("Failed to reserve buffers in ttm.\n");
662
663         if (ret) {
664                 kfree(ctx->vm_pd);
665                 ctx->vm_pd = NULL;
666         }
667
668         return ret;
669 }
670
671 /**
672  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
673  * @ctx: Reservation context to unreserve
674  * @wait: Optionally wait for a sync object representing pending VM updates
675  * @intr: Whether the wait is interruptible
676  *
677  * Also frees any resources allocated in
678  * reserve_bo_and_(cond_)vm(s). Returns the status from
679  * amdgpu_sync_wait.
680  */
681 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
682                                  bool wait, bool intr)
683 {
684         int ret = 0;
685
686         if (wait)
687                 ret = amdgpu_sync_wait(ctx->sync, intr);
688
689         if (ctx->reserved)
690                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
691         kfree(ctx->vm_pd);
692
693         ctx->sync = NULL;
694
695         ctx->reserved = false;
696         ctx->vm_pd = NULL;
697
698         return ret;
699 }
700
701 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
702                                 struct kfd_bo_va_list *entry,
703                                 struct amdgpu_sync *sync)
704 {
705         struct amdgpu_bo_va *bo_va = entry->bo_va;
706         struct amdgpu_vm *vm = bo_va->base.vm;
707
708         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
709
710         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
711
712         amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
713
714         return 0;
715 }
716
717 static int update_gpuvm_pte(struct amdgpu_device *adev,
718                 struct kfd_bo_va_list *entry,
719                 struct amdgpu_sync *sync)
720 {
721         int ret;
722         struct amdgpu_bo_va *bo_va = entry->bo_va;
723
724         /* Update the page tables  */
725         ret = amdgpu_vm_bo_update(adev, bo_va, false);
726         if (ret) {
727                 pr_err("amdgpu_vm_bo_update failed\n");
728                 return ret;
729         }
730
731         return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
732 }
733
734 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
735                 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
736                 bool no_update_pte)
737 {
738         int ret;
739
740         /* Set virtual address for the allocation */
741         ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
742                                amdgpu_bo_size(entry->bo_va->base.bo),
743                                entry->pte_flags);
744         if (ret) {
745                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
746                                 entry->va, ret);
747                 return ret;
748         }
749
750         if (no_update_pte)
751                 return 0;
752
753         ret = update_gpuvm_pte(adev, entry, sync);
754         if (ret) {
755                 pr_err("update_gpuvm_pte() failed\n");
756                 goto update_gpuvm_pte_failed;
757         }
758
759         return 0;
760
761 update_gpuvm_pte_failed:
762         unmap_bo_from_gpuvm(adev, entry, sync);
763         return ret;
764 }
765
766 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
767 {
768         struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
769
770         if (!sg)
771                 return NULL;
772         if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
773                 kfree(sg);
774                 return NULL;
775         }
776         sg->sgl->dma_address = addr;
777         sg->sgl->length = size;
778 #ifdef CONFIG_NEED_SG_DMA_LENGTH
779         sg->sgl->dma_length = size;
780 #endif
781         return sg;
782 }
783
784 static int process_validate_vms(struct amdkfd_process_info *process_info)
785 {
786         struct amdgpu_vm *peer_vm;
787         int ret;
788
789         list_for_each_entry(peer_vm, &process_info->vm_list_head,
790                             vm_list_node) {
791                 ret = vm_validate_pt_pd_bos(peer_vm);
792                 if (ret)
793                         return ret;
794         }
795
796         return 0;
797 }
798
799 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
800                                  struct amdgpu_sync *sync)
801 {
802         struct amdgpu_vm *peer_vm;
803         int ret;
804
805         list_for_each_entry(peer_vm, &process_info->vm_list_head,
806                             vm_list_node) {
807                 struct amdgpu_bo *pd = peer_vm->root.base.bo;
808
809                 ret = amdgpu_sync_resv(NULL,
810                                         sync, pd->tbo.base.resv,
811                                         AMDGPU_FENCE_OWNER_KFD, false);
812                 if (ret)
813                         return ret;
814         }
815
816         return 0;
817 }
818
819 static int process_update_pds(struct amdkfd_process_info *process_info,
820                               struct amdgpu_sync *sync)
821 {
822         struct amdgpu_vm *peer_vm;
823         int ret;
824
825         list_for_each_entry(peer_vm, &process_info->vm_list_head,
826                             vm_list_node) {
827                 ret = vm_update_pds(peer_vm, sync);
828                 if (ret)
829                         return ret;
830         }
831
832         return 0;
833 }
834
835 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
836                        struct dma_fence **ef)
837 {
838         struct amdkfd_process_info *info = NULL;
839         int ret;
840
841         if (!*process_info) {
842                 info = kzalloc(sizeof(*info), GFP_KERNEL);
843                 if (!info)
844                         return -ENOMEM;
845
846                 mutex_init(&info->lock);
847                 INIT_LIST_HEAD(&info->vm_list_head);
848                 INIT_LIST_HEAD(&info->kfd_bo_list);
849                 INIT_LIST_HEAD(&info->userptr_valid_list);
850                 INIT_LIST_HEAD(&info->userptr_inval_list);
851
852                 info->eviction_fence =
853                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
854                                                    current->mm);
855                 if (!info->eviction_fence) {
856                         pr_err("Failed to create eviction fence\n");
857                         ret = -ENOMEM;
858                         goto create_evict_fence_fail;
859                 }
860
861                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
862                 atomic_set(&info->evicted_bos, 0);
863                 INIT_DELAYED_WORK(&info->restore_userptr_work,
864                                   amdgpu_amdkfd_restore_userptr_worker);
865
866                 *process_info = info;
867                 *ef = dma_fence_get(&info->eviction_fence->base);
868         }
869
870         vm->process_info = *process_info;
871
872         /* Validate page directory and attach eviction fence */
873         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
874         if (ret)
875                 goto reserve_pd_fail;
876         ret = vm_validate_pt_pd_bos(vm);
877         if (ret) {
878                 pr_err("validate_pt_pd_bos() failed\n");
879                 goto validate_pd_fail;
880         }
881         ret = amdgpu_bo_sync_wait(vm->root.base.bo,
882                                   AMDGPU_FENCE_OWNER_KFD, false);
883         if (ret)
884                 goto wait_pd_fail;
885         ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
886         if (ret)
887                 goto reserve_shared_fail;
888         amdgpu_bo_fence(vm->root.base.bo,
889                         &vm->process_info->eviction_fence->base, true);
890         amdgpu_bo_unreserve(vm->root.base.bo);
891
892         /* Update process info */
893         mutex_lock(&vm->process_info->lock);
894         list_add_tail(&vm->vm_list_node,
895                         &(vm->process_info->vm_list_head));
896         vm->process_info->n_vms++;
897         mutex_unlock(&vm->process_info->lock);
898
899         return 0;
900
901 reserve_shared_fail:
902 wait_pd_fail:
903 validate_pd_fail:
904         amdgpu_bo_unreserve(vm->root.base.bo);
905 reserve_pd_fail:
906         vm->process_info = NULL;
907         if (info) {
908                 /* Two fence references: one in info and one in *ef */
909                 dma_fence_put(&info->eviction_fence->base);
910                 dma_fence_put(*ef);
911                 *ef = NULL;
912                 *process_info = NULL;
913                 put_pid(info->pid);
914 create_evict_fence_fail:
915                 mutex_destroy(&info->lock);
916                 kfree(info);
917         }
918         return ret;
919 }
920
921 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
922                                           void **vm, void **process_info,
923                                           struct dma_fence **ef)
924 {
925         struct amdgpu_device *adev = get_amdgpu_device(kgd);
926         struct amdgpu_vm *new_vm;
927         int ret;
928
929         new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
930         if (!new_vm)
931                 return -ENOMEM;
932
933         /* Initialize AMDGPU part of the VM */
934         ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
935         if (ret) {
936                 pr_err("Failed init vm ret %d\n", ret);
937                 goto amdgpu_vm_init_fail;
938         }
939
940         /* Initialize KFD part of the VM and process info */
941         ret = init_kfd_vm(new_vm, process_info, ef);
942         if (ret)
943                 goto init_kfd_vm_fail;
944
945         *vm = (void *) new_vm;
946
947         return 0;
948
949 init_kfd_vm_fail:
950         amdgpu_vm_fini(adev, new_vm);
951 amdgpu_vm_init_fail:
952         kfree(new_vm);
953         return ret;
954 }
955
956 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
957                                            struct file *filp, unsigned int pasid,
958                                            void **vm, void **process_info,
959                                            struct dma_fence **ef)
960 {
961         struct amdgpu_device *adev = get_amdgpu_device(kgd);
962         struct drm_file *drm_priv = filp->private_data;
963         struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
964         struct amdgpu_vm *avm = &drv_priv->vm;
965         int ret;
966
967         /* Already a compute VM? */
968         if (avm->process_info)
969                 return -EINVAL;
970
971         /* Convert VM into a compute VM */
972         ret = amdgpu_vm_make_compute(adev, avm, pasid);
973         if (ret)
974                 return ret;
975
976         /* Initialize KFD part of the VM and process info */
977         ret = init_kfd_vm(avm, process_info, ef);
978         if (ret)
979                 return ret;
980
981         *vm = (void *)avm;
982
983         return 0;
984 }
985
986 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
987                                     struct amdgpu_vm *vm)
988 {
989         struct amdkfd_process_info *process_info = vm->process_info;
990         struct amdgpu_bo *pd = vm->root.base.bo;
991
992         if (!process_info)
993                 return;
994
995         /* Release eviction fence from PD */
996         amdgpu_bo_reserve(pd, false);
997         amdgpu_bo_fence(pd, NULL, false);
998         amdgpu_bo_unreserve(pd);
999
1000         /* Update process info */
1001         mutex_lock(&process_info->lock);
1002         process_info->n_vms--;
1003         list_del(&vm->vm_list_node);
1004         mutex_unlock(&process_info->lock);
1005
1006         /* Release per-process resources when last compute VM is destroyed */
1007         if (!process_info->n_vms) {
1008                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1009                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1010                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1011
1012                 dma_fence_put(&process_info->eviction_fence->base);
1013                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1014                 put_pid(process_info->pid);
1015                 mutex_destroy(&process_info->lock);
1016                 kfree(process_info);
1017         }
1018 }
1019
1020 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1021 {
1022         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1023         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1024
1025         if (WARN_ON(!kgd || !vm))
1026                 return;
1027
1028         pr_debug("Destroying process vm %p\n", vm);
1029
1030         /* Release the VM context */
1031         amdgpu_vm_fini(adev, avm);
1032         kfree(vm);
1033 }
1034
1035 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1036 {
1037         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1038         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1039
1040         if (WARN_ON(!kgd || !vm))
1041                 return;
1042
1043         pr_debug("Releasing process vm %p\n", vm);
1044
1045         /* The original pasid of amdgpu vm has already been
1046          * released during making a amdgpu vm to a compute vm
1047          * The current pasid is managed by kfd and will be
1048          * released on kfd process destroy. Set amdgpu pasid
1049          * to 0 to avoid duplicate release.
1050          */
1051         amdgpu_vm_release_compute(adev, avm);
1052 }
1053
1054 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1055 {
1056         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1057         struct amdgpu_bo *pd = avm->root.base.bo;
1058         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1059
1060         if (adev->asic_type < CHIP_VEGA10)
1061                 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1062         return avm->pd_phys_addr;
1063 }
1064
1065 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1066                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1067                 void *vm, struct kgd_mem **mem,
1068                 uint64_t *offset, uint32_t flags)
1069 {
1070         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1071         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1072         enum ttm_bo_type bo_type = ttm_bo_type_device;
1073         struct sg_table *sg = NULL;
1074         uint64_t user_addr = 0;
1075         struct amdgpu_bo *bo;
1076         struct amdgpu_bo_param bp;
1077         int byte_align;
1078         u32 domain, alloc_domain;
1079         u64 alloc_flags;
1080         uint32_t mapping_flags;
1081         int ret;
1082
1083         /*
1084          * Check on which domain to allocate BO
1085          */
1086         if (flags & ALLOC_MEM_FLAGS_VRAM) {
1087                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1088                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
1089                 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1090                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1091                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1092         } else if (flags & ALLOC_MEM_FLAGS_GTT) {
1093                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1094                 alloc_flags = 0;
1095         } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1096                 domain = AMDGPU_GEM_DOMAIN_GTT;
1097                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1098                 alloc_flags = 0;
1099                 if (!offset || !*offset)
1100                         return -EINVAL;
1101                 user_addr = *offset;
1102         } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
1103                         ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1104                 domain = AMDGPU_GEM_DOMAIN_GTT;
1105                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1106                 bo_type = ttm_bo_type_sg;
1107                 alloc_flags = 0;
1108                 if (size > UINT_MAX)
1109                         return -EINVAL;
1110                 sg = create_doorbell_sg(*offset, size);
1111                 if (!sg)
1112                         return -ENOMEM;
1113         } else {
1114                 return -EINVAL;
1115         }
1116
1117         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1118         if (!*mem) {
1119                 ret = -ENOMEM;
1120                 goto err;
1121         }
1122         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1123         mutex_init(&(*mem)->lock);
1124         (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1125
1126         /* Workaround for AQL queue wraparound bug. Map the same
1127          * memory twice. That means we only actually allocate half
1128          * the memory.
1129          */
1130         if ((*mem)->aql_queue)
1131                 size = size >> 1;
1132
1133         /* Workaround for TLB bug on older VI chips */
1134         byte_align = (adev->family == AMDGPU_FAMILY_VI &&
1135                         adev->asic_type != CHIP_FIJI &&
1136                         adev->asic_type != CHIP_POLARIS10 &&
1137                         adev->asic_type != CHIP_POLARIS11 &&
1138                         adev->asic_type != CHIP_POLARIS12) ?
1139                         VI_BO_SIZE_ALIGN : 1;
1140
1141         mapping_flags = AMDGPU_VM_PAGE_READABLE;
1142         if (flags & ALLOC_MEM_FLAGS_WRITABLE)
1143                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
1144         if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
1145                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1146         if (flags & ALLOC_MEM_FLAGS_COHERENT)
1147                 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1148         else
1149                 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1150         (*mem)->mapping_flags = mapping_flags;
1151
1152         amdgpu_sync_create(&(*mem)->sync);
1153
1154         ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1155         if (ret) {
1156                 pr_debug("Insufficient system memory\n");
1157                 goto err_reserve_limit;
1158         }
1159
1160         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1161                         va, size, domain_string(alloc_domain));
1162
1163         memset(&bp, 0, sizeof(bp));
1164         bp.size = size;
1165         bp.byte_align = byte_align;
1166         bp.domain = alloc_domain;
1167         bp.flags = alloc_flags;
1168         bp.type = bo_type;
1169         bp.resv = NULL;
1170         ret = amdgpu_bo_create(adev, &bp, &bo);
1171         if (ret) {
1172                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1173                                 domain_string(alloc_domain), ret);
1174                 goto err_bo_create;
1175         }
1176         if (bo_type == ttm_bo_type_sg) {
1177                 bo->tbo.sg = sg;
1178                 bo->tbo.ttm->sg = sg;
1179         }
1180         bo->kfd_bo = *mem;
1181         (*mem)->bo = bo;
1182         if (user_addr)
1183                 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1184
1185         (*mem)->va = va;
1186         (*mem)->domain = domain;
1187         (*mem)->mapped_to_gpu_memory = 0;
1188         (*mem)->process_info = avm->process_info;
1189         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1190
1191         if (user_addr) {
1192                 ret = init_user_pages(*mem, current->mm, user_addr);
1193                 if (ret)
1194                         goto allocate_init_user_pages_failed;
1195         }
1196
1197         if (offset)
1198                 *offset = amdgpu_bo_mmap_offset(bo);
1199
1200         return 0;
1201
1202 allocate_init_user_pages_failed:
1203         remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1204         amdgpu_bo_unref(&bo);
1205         /* Don't unreserve system mem limit twice */
1206         goto err_reserve_limit;
1207 err_bo_create:
1208         unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1209 err_reserve_limit:
1210         mutex_destroy(&(*mem)->lock);
1211         kfree(*mem);
1212 err:
1213         if (sg) {
1214                 sg_free_table(sg);
1215                 kfree(sg);
1216         }
1217         return ret;
1218 }
1219
1220 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1221                 struct kgd_dev *kgd, struct kgd_mem *mem)
1222 {
1223         struct amdkfd_process_info *process_info = mem->process_info;
1224         unsigned long bo_size = mem->bo->tbo.mem.size;
1225         struct kfd_bo_va_list *entry, *tmp;
1226         struct bo_vm_reservation_context ctx;
1227         struct ttm_validate_buffer *bo_list_entry;
1228         int ret;
1229
1230         mutex_lock(&mem->lock);
1231
1232         if (mem->mapped_to_gpu_memory > 0) {
1233                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1234                                 mem->va, bo_size);
1235                 mutex_unlock(&mem->lock);
1236                 return -EBUSY;
1237         }
1238
1239         mutex_unlock(&mem->lock);
1240         /* lock is not needed after this, since mem is unused and will
1241          * be freed anyway
1242          */
1243
1244         /* No more MMU notifiers */
1245         amdgpu_mn_unregister(mem->bo);
1246
1247         /* Make sure restore workers don't access the BO any more */
1248         bo_list_entry = &mem->validate_list;
1249         mutex_lock(&process_info->lock);
1250         list_del(&bo_list_entry->head);
1251         mutex_unlock(&process_info->lock);
1252
1253         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1254         if (unlikely(ret))
1255                 return ret;
1256
1257         /* The eviction fence should be removed by the last unmap.
1258          * TODO: Log an error condition if the bo still has the eviction fence
1259          * attached
1260          */
1261         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1262                                         process_info->eviction_fence);
1263         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1264                 mem->va + bo_size * (1 + mem->aql_queue));
1265
1266         /* Remove from VM internal data structures */
1267         list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1268                 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1269                                 entry, bo_size);
1270
1271         ret = unreserve_bo_and_vms(&ctx, false, false);
1272
1273         /* Free the sync object */
1274         amdgpu_sync_free(&mem->sync);
1275
1276         /* If the SG is not NULL, it's one we created for a doorbell or mmio
1277          * remap BO. We need to free it.
1278          */
1279         if (mem->bo->tbo.sg) {
1280                 sg_free_table(mem->bo->tbo.sg);
1281                 kfree(mem->bo->tbo.sg);
1282         }
1283
1284         /* Free the BO*/
1285         amdgpu_bo_unref(&mem->bo);
1286         mutex_destroy(&mem->lock);
1287         kfree(mem);
1288
1289         return ret;
1290 }
1291
1292 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1293                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1294 {
1295         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1296         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1297         int ret;
1298         struct amdgpu_bo *bo;
1299         uint32_t domain;
1300         struct kfd_bo_va_list *entry;
1301         struct bo_vm_reservation_context ctx;
1302         struct kfd_bo_va_list *bo_va_entry = NULL;
1303         struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1304         unsigned long bo_size;
1305         bool is_invalid_userptr = false;
1306
1307         bo = mem->bo;
1308         if (!bo) {
1309                 pr_err("Invalid BO when mapping memory to GPU\n");
1310                 return -EINVAL;
1311         }
1312
1313         /* Make sure restore is not running concurrently. Since we
1314          * don't map invalid userptr BOs, we rely on the next restore
1315          * worker to do the mapping
1316          */
1317         mutex_lock(&mem->process_info->lock);
1318
1319         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1320          * sure that the MMU notifier is no longer running
1321          * concurrently and the queues are actually stopped
1322          */
1323         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1324                 down_write(&current->mm->mmap_sem);
1325                 is_invalid_userptr = atomic_read(&mem->invalid);
1326                 up_write(&current->mm->mmap_sem);
1327         }
1328
1329         mutex_lock(&mem->lock);
1330
1331         domain = mem->domain;
1332         bo_size = bo->tbo.mem.size;
1333
1334         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1335                         mem->va,
1336                         mem->va + bo_size * (1 + mem->aql_queue),
1337                         vm, domain_string(domain));
1338
1339         ret = reserve_bo_and_vm(mem, vm, &ctx);
1340         if (unlikely(ret))
1341                 goto out;
1342
1343         /* Userptr can be marked as "not invalid", but not actually be
1344          * validated yet (still in the system domain). In that case
1345          * the queues are still stopped and we can leave mapping for
1346          * the next restore worker
1347          */
1348         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1349             bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1350                 is_invalid_userptr = true;
1351
1352         if (check_if_add_bo_to_vm(avm, mem)) {
1353                 ret = add_bo_to_vm(adev, mem, avm, false,
1354                                 &bo_va_entry);
1355                 if (ret)
1356                         goto add_bo_to_vm_failed;
1357                 if (mem->aql_queue) {
1358                         ret = add_bo_to_vm(adev, mem, avm,
1359                                         true, &bo_va_entry_aql);
1360                         if (ret)
1361                                 goto add_bo_to_vm_failed_aql;
1362                 }
1363         } else {
1364                 ret = vm_validate_pt_pd_bos(avm);
1365                 if (unlikely(ret))
1366                         goto add_bo_to_vm_failed;
1367         }
1368
1369         if (mem->mapped_to_gpu_memory == 0 &&
1370             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1371                 /* Validate BO only once. The eviction fence gets added to BO
1372                  * the first time it is mapped. Validate will wait for all
1373                  * background evictions to complete.
1374                  */
1375                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1376                 if (ret) {
1377                         pr_debug("Validate failed\n");
1378                         goto map_bo_to_gpuvm_failed;
1379                 }
1380         }
1381
1382         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1383                 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1384                         pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1385                                         entry->va, entry->va + bo_size,
1386                                         entry);
1387
1388                         ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1389                                               is_invalid_userptr);
1390                         if (ret) {
1391                                 pr_err("Failed to map bo to gpuvm\n");
1392                                 goto map_bo_to_gpuvm_failed;
1393                         }
1394
1395                         ret = vm_update_pds(vm, ctx.sync);
1396                         if (ret) {
1397                                 pr_err("Failed to update page directories\n");
1398                                 goto map_bo_to_gpuvm_failed;
1399                         }
1400
1401                         entry->is_mapped = true;
1402                         mem->mapped_to_gpu_memory++;
1403                         pr_debug("\t INC mapping count %d\n",
1404                                         mem->mapped_to_gpu_memory);
1405                 }
1406         }
1407
1408         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1409                 amdgpu_bo_fence(bo,
1410                                 &avm->process_info->eviction_fence->base,
1411                                 true);
1412         ret = unreserve_bo_and_vms(&ctx, false, false);
1413
1414         goto out;
1415
1416 map_bo_to_gpuvm_failed:
1417         if (bo_va_entry_aql)
1418                 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1419 add_bo_to_vm_failed_aql:
1420         if (bo_va_entry)
1421                 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1422 add_bo_to_vm_failed:
1423         unreserve_bo_and_vms(&ctx, false, false);
1424 out:
1425         mutex_unlock(&mem->process_info->lock);
1426         mutex_unlock(&mem->lock);
1427         return ret;
1428 }
1429
1430 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1431                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1432 {
1433         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1434         struct amdkfd_process_info *process_info =
1435                 ((struct amdgpu_vm *)vm)->process_info;
1436         unsigned long bo_size = mem->bo->tbo.mem.size;
1437         struct kfd_bo_va_list *entry;
1438         struct bo_vm_reservation_context ctx;
1439         int ret;
1440
1441         mutex_lock(&mem->lock);
1442
1443         ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1444         if (unlikely(ret))
1445                 goto out;
1446         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1447         if (ctx.n_vms == 0) {
1448                 ret = -EINVAL;
1449                 goto unreserve_out;
1450         }
1451
1452         ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1453         if (unlikely(ret))
1454                 goto unreserve_out;
1455
1456         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1457                 mem->va,
1458                 mem->va + bo_size * (1 + mem->aql_queue),
1459                 vm);
1460
1461         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1462                 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1463                         pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1464                                         entry->va,
1465                                         entry->va + bo_size,
1466                                         entry);
1467
1468                         ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1469                         if (ret == 0) {
1470                                 entry->is_mapped = false;
1471                         } else {
1472                                 pr_err("failed to unmap VA 0x%llx\n",
1473                                                 mem->va);
1474                                 goto unreserve_out;
1475                         }
1476
1477                         mem->mapped_to_gpu_memory--;
1478                         pr_debug("\t DEC mapping count %d\n",
1479                                         mem->mapped_to_gpu_memory);
1480                 }
1481         }
1482
1483         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1484          * required.
1485          */
1486         if (mem->mapped_to_gpu_memory == 0 &&
1487             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1488                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1489                                                 process_info->eviction_fence);
1490
1491 unreserve_out:
1492         unreserve_bo_and_vms(&ctx, false, false);
1493 out:
1494         mutex_unlock(&mem->lock);
1495         return ret;
1496 }
1497
1498 int amdgpu_amdkfd_gpuvm_sync_memory(
1499                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1500 {
1501         struct amdgpu_sync sync;
1502         int ret;
1503
1504         amdgpu_sync_create(&sync);
1505
1506         mutex_lock(&mem->lock);
1507         amdgpu_sync_clone(&mem->sync, &sync);
1508         mutex_unlock(&mem->lock);
1509
1510         ret = amdgpu_sync_wait(&sync, intr);
1511         amdgpu_sync_free(&sync);
1512         return ret;
1513 }
1514
1515 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1516                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1517 {
1518         int ret;
1519         struct amdgpu_bo *bo = mem->bo;
1520
1521         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1522                 pr_err("userptr can't be mapped to kernel\n");
1523                 return -EINVAL;
1524         }
1525
1526         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1527          * this BO in BO's restoring after eviction.
1528          */
1529         mutex_lock(&mem->process_info->lock);
1530
1531         ret = amdgpu_bo_reserve(bo, true);
1532         if (ret) {
1533                 pr_err("Failed to reserve bo. ret %d\n", ret);
1534                 goto bo_reserve_failed;
1535         }
1536
1537         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1538         if (ret) {
1539                 pr_err("Failed to pin bo. ret %d\n", ret);
1540                 goto pin_failed;
1541         }
1542
1543         ret = amdgpu_bo_kmap(bo, kptr);
1544         if (ret) {
1545                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1546                 goto kmap_failed;
1547         }
1548
1549         amdgpu_amdkfd_remove_eviction_fence(
1550                 bo, mem->process_info->eviction_fence);
1551         list_del_init(&mem->validate_list.head);
1552
1553         if (size)
1554                 *size = amdgpu_bo_size(bo);
1555
1556         amdgpu_bo_unreserve(bo);
1557
1558         mutex_unlock(&mem->process_info->lock);
1559         return 0;
1560
1561 kmap_failed:
1562         amdgpu_bo_unpin(bo);
1563 pin_failed:
1564         amdgpu_bo_unreserve(bo);
1565 bo_reserve_failed:
1566         mutex_unlock(&mem->process_info->lock);
1567
1568         return ret;
1569 }
1570
1571 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1572                                               struct kfd_vm_fault_info *mem)
1573 {
1574         struct amdgpu_device *adev;
1575
1576         adev = (struct amdgpu_device *)kgd;
1577         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1578                 *mem = *adev->gmc.vm_fault_info;
1579                 mb();
1580                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1581         }
1582         return 0;
1583 }
1584
1585 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1586                                       struct dma_buf *dma_buf,
1587                                       uint64_t va, void *vm,
1588                                       struct kgd_mem **mem, uint64_t *size,
1589                                       uint64_t *mmap_offset)
1590 {
1591         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1592         struct drm_gem_object *obj;
1593         struct amdgpu_bo *bo;
1594         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1595
1596         if (dma_buf->ops != &amdgpu_dmabuf_ops)
1597                 /* Can't handle non-graphics buffers */
1598                 return -EINVAL;
1599
1600         obj = dma_buf->priv;
1601         if (obj->dev->dev_private != adev)
1602                 /* Can't handle buffers from other devices */
1603                 return -EINVAL;
1604
1605         bo = gem_to_amdgpu_bo(obj);
1606         if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1607                                     AMDGPU_GEM_DOMAIN_GTT)))
1608                 /* Only VRAM and GTT BOs are supported */
1609                 return -EINVAL;
1610
1611         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1612         if (!*mem)
1613                 return -ENOMEM;
1614
1615         if (size)
1616                 *size = amdgpu_bo_size(bo);
1617
1618         if (mmap_offset)
1619                 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1620
1621         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1622         mutex_init(&(*mem)->lock);
1623         (*mem)->mapping_flags =
1624                 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
1625                 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
1626
1627         (*mem)->bo = amdgpu_bo_ref(bo);
1628         (*mem)->va = va;
1629         (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1630                 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1631         (*mem)->mapped_to_gpu_memory = 0;
1632         (*mem)->process_info = avm->process_info;
1633         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1634         amdgpu_sync_create(&(*mem)->sync);
1635
1636         return 0;
1637 }
1638
1639 /* Evict a userptr BO by stopping the queues if necessary
1640  *
1641  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1642  * cannot do any memory allocations, and cannot take any locks that
1643  * are held elsewhere while allocating memory. Therefore this is as
1644  * simple as possible, using atomic counters.
1645  *
1646  * It doesn't do anything to the BO itself. The real work happens in
1647  * restore, where we get updated page addresses. This function only
1648  * ensures that GPU access to the BO is stopped.
1649  */
1650 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1651                                 struct mm_struct *mm)
1652 {
1653         struct amdkfd_process_info *process_info = mem->process_info;
1654         int invalid, evicted_bos;
1655         int r = 0;
1656
1657         invalid = atomic_inc_return(&mem->invalid);
1658         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1659         if (evicted_bos == 1) {
1660                 /* First eviction, stop the queues */
1661                 r = kgd2kfd_quiesce_mm(mm);
1662                 if (r)
1663                         pr_err("Failed to quiesce KFD\n");
1664                 schedule_delayed_work(&process_info->restore_userptr_work,
1665                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1666         }
1667
1668         return r;
1669 }
1670
1671 /* Update invalid userptr BOs
1672  *
1673  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1674  * userptr_inval_list and updates user pages for all BOs that have
1675  * been invalidated since their last update.
1676  */
1677 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1678                                      struct mm_struct *mm)
1679 {
1680         struct kgd_mem *mem, *tmp_mem;
1681         struct amdgpu_bo *bo;
1682         struct ttm_operation_ctx ctx = { false, false };
1683         int invalid, ret;
1684
1685         /* Move all invalidated BOs to the userptr_inval_list and
1686          * release their user pages by migration to the CPU domain
1687          */
1688         list_for_each_entry_safe(mem, tmp_mem,
1689                                  &process_info->userptr_valid_list,
1690                                  validate_list.head) {
1691                 if (!atomic_read(&mem->invalid))
1692                         continue; /* BO is still valid */
1693
1694                 bo = mem->bo;
1695
1696                 if (amdgpu_bo_reserve(bo, true))
1697                         return -EAGAIN;
1698                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1699                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1700                 amdgpu_bo_unreserve(bo);
1701                 if (ret) {
1702                         pr_err("%s: Failed to invalidate userptr BO\n",
1703                                __func__);
1704                         return -EAGAIN;
1705                 }
1706
1707                 list_move_tail(&mem->validate_list.head,
1708                                &process_info->userptr_inval_list);
1709         }
1710
1711         if (list_empty(&process_info->userptr_inval_list))
1712                 return 0; /* All evicted userptr BOs were freed */
1713
1714         /* Go through userptr_inval_list and update any invalid user_pages */
1715         list_for_each_entry(mem, &process_info->userptr_inval_list,
1716                             validate_list.head) {
1717                 invalid = atomic_read(&mem->invalid);
1718                 if (!invalid)
1719                         /* BO hasn't been invalidated since the last
1720                          * revalidation attempt. Keep its BO list.
1721                          */
1722                         continue;
1723
1724                 bo = mem->bo;
1725
1726                 /* Get updated user pages */
1727                 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1728                 if (ret) {
1729                         pr_debug("%s: Failed to get user pages: %d\n",
1730                                 __func__, ret);
1731
1732                         /* Return error -EBUSY or -ENOMEM, retry restore */
1733                         return ret;
1734                 }
1735
1736                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1737
1738                 /* Mark the BO as valid unless it was invalidated
1739                  * again concurrently.
1740                  */
1741                 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1742                         return -EAGAIN;
1743         }
1744
1745         return 0;
1746 }
1747
1748 /* Validate invalid userptr BOs
1749  *
1750  * Validates BOs on the userptr_inval_list, and moves them back to the
1751  * userptr_valid_list. Also updates GPUVM page tables with new page
1752  * addresses and waits for the page table updates to complete.
1753  */
1754 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1755 {
1756         struct amdgpu_bo_list_entry *pd_bo_list_entries;
1757         struct list_head resv_list, duplicates;
1758         struct ww_acquire_ctx ticket;
1759         struct amdgpu_sync sync;
1760
1761         struct amdgpu_vm *peer_vm;
1762         struct kgd_mem *mem, *tmp_mem;
1763         struct amdgpu_bo *bo;
1764         struct ttm_operation_ctx ctx = { false, false };
1765         int i, ret;
1766
1767         pd_bo_list_entries = kcalloc(process_info->n_vms,
1768                                      sizeof(struct amdgpu_bo_list_entry),
1769                                      GFP_KERNEL);
1770         if (!pd_bo_list_entries) {
1771                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1772                 ret = -ENOMEM;
1773                 goto out_no_mem;
1774         }
1775
1776         INIT_LIST_HEAD(&resv_list);
1777         INIT_LIST_HEAD(&duplicates);
1778
1779         /* Get all the page directory BOs that need to be reserved */
1780         i = 0;
1781         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1782                             vm_list_node)
1783                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1784                                     &pd_bo_list_entries[i++]);
1785         /* Add the userptr_inval_list entries to resv_list */
1786         list_for_each_entry(mem, &process_info->userptr_inval_list,
1787                             validate_list.head) {
1788                 list_add_tail(&mem->resv_list.head, &resv_list);
1789                 mem->resv_list.bo = mem->validate_list.bo;
1790                 mem->resv_list.num_shared = mem->validate_list.num_shared;
1791         }
1792
1793         /* Reserve all BOs and page tables for validation */
1794         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
1795                                      true);
1796         WARN(!list_empty(&duplicates), "Duplicates should be empty");
1797         if (ret)
1798                 goto out_free;
1799
1800         amdgpu_sync_create(&sync);
1801
1802         ret = process_validate_vms(process_info);
1803         if (ret)
1804                 goto unreserve_out;
1805
1806         /* Validate BOs and update GPUVM page tables */
1807         list_for_each_entry_safe(mem, tmp_mem,
1808                                  &process_info->userptr_inval_list,
1809                                  validate_list.head) {
1810                 struct kfd_bo_va_list *bo_va_entry;
1811
1812                 bo = mem->bo;
1813
1814                 /* Validate the BO if we got user pages */
1815                 if (bo->tbo.ttm->pages[0]) {
1816                         amdgpu_bo_placement_from_domain(bo, mem->domain);
1817                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1818                         if (ret) {
1819                                 pr_err("%s: failed to validate BO\n", __func__);
1820                                 goto unreserve_out;
1821                         }
1822                 }
1823
1824                 list_move_tail(&mem->validate_list.head,
1825                                &process_info->userptr_valid_list);
1826
1827                 /* Update mapping. If the BO was not validated
1828                  * (because we couldn't get user pages), this will
1829                  * clear the page table entries, which will result in
1830                  * VM faults if the GPU tries to access the invalid
1831                  * memory.
1832                  */
1833                 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1834                         if (!bo_va_entry->is_mapped)
1835                                 continue;
1836
1837                         ret = update_gpuvm_pte((struct amdgpu_device *)
1838                                                bo_va_entry->kgd_dev,
1839                                                bo_va_entry, &sync);
1840                         if (ret) {
1841                                 pr_err("%s: update PTE failed\n", __func__);
1842                                 /* make sure this gets validated again */
1843                                 atomic_inc(&mem->invalid);
1844                                 goto unreserve_out;
1845                         }
1846                 }
1847         }
1848
1849         /* Update page directories */
1850         ret = process_update_pds(process_info, &sync);
1851
1852 unreserve_out:
1853         ttm_eu_backoff_reservation(&ticket, &resv_list);
1854         amdgpu_sync_wait(&sync, false);
1855         amdgpu_sync_free(&sync);
1856 out_free:
1857         kfree(pd_bo_list_entries);
1858 out_no_mem:
1859
1860         return ret;
1861 }
1862
1863 /* Worker callback to restore evicted userptr BOs
1864  *
1865  * Tries to update and validate all userptr BOs. If successful and no
1866  * concurrent evictions happened, the queues are restarted. Otherwise,
1867  * reschedule for another attempt later.
1868  */
1869 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1870 {
1871         struct delayed_work *dwork = to_delayed_work(work);
1872         struct amdkfd_process_info *process_info =
1873                 container_of(dwork, struct amdkfd_process_info,
1874                              restore_userptr_work);
1875         struct task_struct *usertask;
1876         struct mm_struct *mm;
1877         int evicted_bos;
1878
1879         evicted_bos = atomic_read(&process_info->evicted_bos);
1880         if (!evicted_bos)
1881                 return;
1882
1883         /* Reference task and mm in case of concurrent process termination */
1884         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1885         if (!usertask)
1886                 return;
1887         mm = get_task_mm(usertask);
1888         if (!mm) {
1889                 put_task_struct(usertask);
1890                 return;
1891         }
1892
1893         mutex_lock(&process_info->lock);
1894
1895         if (update_invalid_user_pages(process_info, mm))
1896                 goto unlock_out;
1897         /* userptr_inval_list can be empty if all evicted userptr BOs
1898          * have been freed. In that case there is nothing to validate
1899          * and we can just restart the queues.
1900          */
1901         if (!list_empty(&process_info->userptr_inval_list)) {
1902                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1903                         goto unlock_out; /* Concurrent eviction, try again */
1904
1905                 if (validate_invalid_user_pages(process_info))
1906                         goto unlock_out;
1907         }
1908         /* Final check for concurrent evicton and atomic update. If
1909          * another eviction happens after successful update, it will
1910          * be a first eviction that calls quiesce_mm. The eviction
1911          * reference counting inside KFD will handle this case.
1912          */
1913         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1914             evicted_bos)
1915                 goto unlock_out;
1916         evicted_bos = 0;
1917         if (kgd2kfd_resume_mm(mm)) {
1918                 pr_err("%s: Failed to resume KFD\n", __func__);
1919                 /* No recovery from this failure. Probably the CP is
1920                  * hanging. No point trying again.
1921                  */
1922         }
1923
1924 unlock_out:
1925         mutex_unlock(&process_info->lock);
1926         mmput(mm);
1927         put_task_struct(usertask);
1928
1929         /* If validation failed, reschedule another attempt */
1930         if (evicted_bos)
1931                 schedule_delayed_work(&process_info->restore_userptr_work,
1932                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1933 }
1934
1935 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1936  *   KFD process identified by process_info
1937  *
1938  * @process_info: amdkfd_process_info of the KFD process
1939  *
1940  * After memory eviction, restore thread calls this function. The function
1941  * should be called when the Process is still valid. BO restore involves -
1942  *
1943  * 1.  Release old eviction fence and create new one
1944  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1945  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1946  *     BOs that need to be reserved.
1947  * 4.  Reserve all the BOs
1948  * 5.  Validate of PD and PT BOs.
1949  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1950  * 7.  Add fence to all PD and PT BOs.
1951  * 8.  Unreserve all BOs
1952  */
1953 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1954 {
1955         struct amdgpu_bo_list_entry *pd_bo_list;
1956         struct amdkfd_process_info *process_info = info;
1957         struct amdgpu_vm *peer_vm;
1958         struct kgd_mem *mem;
1959         struct bo_vm_reservation_context ctx;
1960         struct amdgpu_amdkfd_fence *new_fence;
1961         int ret = 0, i;
1962         struct list_head duplicate_save;
1963         struct amdgpu_sync sync_obj;
1964
1965         INIT_LIST_HEAD(&duplicate_save);
1966         INIT_LIST_HEAD(&ctx.list);
1967         INIT_LIST_HEAD(&ctx.duplicates);
1968
1969         pd_bo_list = kcalloc(process_info->n_vms,
1970                              sizeof(struct amdgpu_bo_list_entry),
1971                              GFP_KERNEL);
1972         if (!pd_bo_list)
1973                 return -ENOMEM;
1974
1975         i = 0;
1976         mutex_lock(&process_info->lock);
1977         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1978                         vm_list_node)
1979                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
1980
1981         /* Reserve all BOs and page tables/directory. Add all BOs from
1982          * kfd_bo_list to ctx.list
1983          */
1984         list_for_each_entry(mem, &process_info->kfd_bo_list,
1985                             validate_list.head) {
1986
1987                 list_add_tail(&mem->resv_list.head, &ctx.list);
1988                 mem->resv_list.bo = mem->validate_list.bo;
1989                 mem->resv_list.num_shared = mem->validate_list.num_shared;
1990         }
1991
1992         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
1993                                      false, &duplicate_save, true);
1994         if (ret) {
1995                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
1996                 goto ttm_reserve_fail;
1997         }
1998
1999         amdgpu_sync_create(&sync_obj);
2000
2001         /* Validate PDs and PTs */
2002         ret = process_validate_vms(process_info);
2003         if (ret)
2004                 goto validate_map_fail;
2005
2006         ret = process_sync_pds_resv(process_info, &sync_obj);
2007         if (ret) {
2008                 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2009                 goto validate_map_fail;
2010         }
2011
2012         /* Validate BOs and map them to GPUVM (update VM page tables). */
2013         list_for_each_entry(mem, &process_info->kfd_bo_list,
2014                             validate_list.head) {
2015
2016                 struct amdgpu_bo *bo = mem->bo;
2017                 uint32_t domain = mem->domain;
2018                 struct kfd_bo_va_list *bo_va_entry;
2019
2020                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2021                 if (ret) {
2022                         pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2023                         goto validate_map_fail;
2024                 }
2025                 ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
2026                 if (ret) {
2027                         pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2028                         goto validate_map_fail;
2029                 }
2030                 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2031                                     bo_list) {
2032                         ret = update_gpuvm_pte((struct amdgpu_device *)
2033                                               bo_va_entry->kgd_dev,
2034                                               bo_va_entry,
2035                                               &sync_obj);
2036                         if (ret) {
2037                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2038                                 goto validate_map_fail;
2039                         }
2040                 }
2041         }
2042
2043         /* Update page directories */
2044         ret = process_update_pds(process_info, &sync_obj);
2045         if (ret) {
2046                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2047                 goto validate_map_fail;
2048         }
2049
2050         /* Wait for validate and PT updates to finish */
2051         amdgpu_sync_wait(&sync_obj, false);
2052
2053         /* Release old eviction fence and create new one, because fence only
2054          * goes from unsignaled to signaled, fence cannot be reused.
2055          * Use context and mm from the old fence.
2056          */
2057         new_fence = amdgpu_amdkfd_fence_create(
2058                                 process_info->eviction_fence->base.context,
2059                                 process_info->eviction_fence->mm);
2060         if (!new_fence) {
2061                 pr_err("Failed to create eviction fence\n");
2062                 ret = -ENOMEM;
2063                 goto validate_map_fail;
2064         }
2065         dma_fence_put(&process_info->eviction_fence->base);
2066         process_info->eviction_fence = new_fence;
2067         *ef = dma_fence_get(&new_fence->base);
2068
2069         /* Attach new eviction fence to all BOs */
2070         list_for_each_entry(mem, &process_info->kfd_bo_list,
2071                 validate_list.head)
2072                 amdgpu_bo_fence(mem->bo,
2073                         &process_info->eviction_fence->base, true);
2074
2075         /* Attach eviction fence to PD / PT BOs */
2076         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2077                             vm_list_node) {
2078                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2079
2080                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2081         }
2082
2083 validate_map_fail:
2084         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2085         amdgpu_sync_free(&sync_obj);
2086 ttm_reserve_fail:
2087         mutex_unlock(&process_info->lock);
2088         kfree(pd_bo_list);
2089         return ret;
2090 }
2091
2092 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2093 {
2094         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2095         struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2096         int ret;
2097
2098         if (!info || !gws)
2099                 return -EINVAL;
2100
2101         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2102         if (!*mem)
2103                 return -ENOMEM;
2104
2105         mutex_init(&(*mem)->lock);
2106         (*mem)->bo = amdgpu_bo_ref(gws_bo);
2107         (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2108         (*mem)->process_info = process_info;
2109         add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2110         amdgpu_sync_create(&(*mem)->sync);
2111
2112
2113         /* Validate gws bo the first time it is added to process */
2114         mutex_lock(&(*mem)->process_info->lock);
2115         ret = amdgpu_bo_reserve(gws_bo, false);
2116         if (unlikely(ret)) {
2117                 pr_err("Reserve gws bo failed %d\n", ret);
2118                 goto bo_reservation_failure;
2119         }
2120
2121         ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2122         if (ret) {
2123                 pr_err("GWS BO validate failed %d\n", ret);
2124                 goto bo_validation_failure;
2125         }
2126         /* GWS resource is shared b/t amdgpu and amdkfd
2127          * Add process eviction fence to bo so they can
2128          * evict each other.
2129          */
2130         ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2131         if (ret)
2132                 goto reserve_shared_fail;
2133         amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2134         amdgpu_bo_unreserve(gws_bo);
2135         mutex_unlock(&(*mem)->process_info->lock);
2136
2137         return ret;
2138
2139 reserve_shared_fail:
2140 bo_validation_failure:
2141         amdgpu_bo_unreserve(gws_bo);
2142 bo_reservation_failure:
2143         mutex_unlock(&(*mem)->process_info->lock);
2144         amdgpu_sync_free(&(*mem)->sync);
2145         remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2146         amdgpu_bo_unref(&gws_bo);
2147         mutex_destroy(&(*mem)->lock);
2148         kfree(*mem);
2149         *mem = NULL;
2150         return ret;
2151 }
2152
2153 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2154 {
2155         int ret;
2156         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2157         struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2158         struct amdgpu_bo *gws_bo = kgd_mem->bo;
2159
2160         /* Remove BO from process's validate list so restore worker won't touch
2161          * it anymore
2162          */
2163         remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2164
2165         ret = amdgpu_bo_reserve(gws_bo, false);
2166         if (unlikely(ret)) {
2167                 pr_err("Reserve gws bo failed %d\n", ret);
2168                 //TODO add BO back to validate_list?
2169                 return ret;
2170         }
2171         amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2172                         process_info->eviction_fence);
2173         amdgpu_bo_unreserve(gws_bo);
2174         amdgpu_sync_free(&kgd_mem->sync);
2175         amdgpu_bo_unref(&gws_bo);
2176         mutex_destroy(&kgd_mem->lock);
2177         kfree(mem);
2178         return 0;
2179 }