]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
dma-buf: Restore seqlock around dma_resv updates
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
24
25 #include <linux/dma-buf.h>
26 #include <linux/list.h>
27 #include <linux/pagemap.h>
28 #include <linux/sched/mm.h>
29 #include <linux/sched/task.h>
30
31 #include "amdgpu_object.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_amdkfd.h"
34 #include "amdgpu_dma_buf.h"
35
36 /* Special VM and GART address alignment needed for VI pre-Fiji due to
37  * a HW bug.
38  */
39 #define VI_BO_SIZE_ALIGN (0x8000)
40
41 /* BO flag to indicate a KFD userptr BO */
42 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
43
44 /* Userptr restore delay, just long enough to allow consecutive VM
45  * changes to accumulate
46  */
47 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
48
49 /* Impose limit on how much memory KFD can use */
50 static struct {
51         uint64_t max_system_mem_limit;
52         uint64_t max_ttm_mem_limit;
53         int64_t system_mem_used;
54         int64_t ttm_mem_used;
55         spinlock_t mem_limit_lock;
56 } kfd_mem_limit;
57
58 /* Struct used for amdgpu_amdkfd_bo_validate */
59 struct amdgpu_vm_parser {
60         uint32_t        domain;
61         bool            wait;
62 };
63
64 static const char * const domain_bit_to_string[] = {
65                 "CPU",
66                 "GTT",
67                 "VRAM",
68                 "GDS",
69                 "GWS",
70                 "OA"
71 };
72
73 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
74
75 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
76
77
78 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
79 {
80         return (struct amdgpu_device *)kgd;
81 }
82
83 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
84                 struct kgd_mem *mem)
85 {
86         struct kfd_bo_va_list *entry;
87
88         list_for_each_entry(entry, &mem->bo_va_list, bo_list)
89                 if (entry->bo_va->base.vm == avm)
90                         return false;
91
92         return true;
93 }
94
95 /* Set memory usage limits. Current, limits are
96  *  System (TTM + userptr) memory - 3/4th System RAM
97  *  TTM memory - 3/8th System RAM
98  */
99 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
100 {
101         struct sysinfo si;
102         uint64_t mem;
103
104         si_meminfo(&si);
105         mem = si.totalram - si.totalhigh;
106         mem *= si.mem_unit;
107
108         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
109         kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
110         kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
111         pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
112                 (kfd_mem_limit.max_system_mem_limit >> 20),
113                 (kfd_mem_limit.max_ttm_mem_limit >> 20));
114 }
115
116 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
117                 uint64_t size, u32 domain, bool sg)
118 {
119         size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
120         uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
121         int ret = 0;
122
123         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
124                                        sizeof(struct amdgpu_bo));
125
126         vram_needed = 0;
127         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
128                 /* TTM GTT memory */
129                 system_mem_needed = acc_size + size;
130                 ttm_mem_needed = acc_size + size;
131         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
132                 /* Userptr */
133                 system_mem_needed = acc_size + size;
134                 ttm_mem_needed = acc_size;
135         } else {
136                 /* VRAM and SG */
137                 system_mem_needed = acc_size;
138                 ttm_mem_needed = acc_size;
139                 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
140                         vram_needed = size;
141         }
142
143         spin_lock(&kfd_mem_limit.mem_limit_lock);
144
145         if ((kfd_mem_limit.system_mem_used + system_mem_needed >
146              kfd_mem_limit.max_system_mem_limit) ||
147             (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
148              kfd_mem_limit.max_ttm_mem_limit) ||
149             (adev->kfd.vram_used + vram_needed >
150              adev->gmc.real_vram_size - reserved_for_pt)) {
151                 ret = -ENOMEM;
152         } else {
153                 kfd_mem_limit.system_mem_used += system_mem_needed;
154                 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
155                 adev->kfd.vram_used += vram_needed;
156         }
157
158         spin_unlock(&kfd_mem_limit.mem_limit_lock);
159         return ret;
160 }
161
162 static void unreserve_mem_limit(struct amdgpu_device *adev,
163                 uint64_t size, u32 domain, bool sg)
164 {
165         size_t acc_size;
166
167         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
168                                        sizeof(struct amdgpu_bo));
169
170         spin_lock(&kfd_mem_limit.mem_limit_lock);
171         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
172                 kfd_mem_limit.system_mem_used -= (acc_size + size);
173                 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
174         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
175                 kfd_mem_limit.system_mem_used -= (acc_size + size);
176                 kfd_mem_limit.ttm_mem_used -= acc_size;
177         } else {
178                 kfd_mem_limit.system_mem_used -= acc_size;
179                 kfd_mem_limit.ttm_mem_used -= acc_size;
180                 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
181                         adev->kfd.vram_used -= size;
182                         WARN_ONCE(adev->kfd.vram_used < 0,
183                                   "kfd VRAM memory accounting unbalanced");
184                 }
185         }
186         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
187                   "kfd system memory accounting unbalanced");
188         WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
189                   "kfd TTM memory accounting unbalanced");
190
191         spin_unlock(&kfd_mem_limit.mem_limit_lock);
192 }
193
194 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
195 {
196         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
197         u32 domain = bo->preferred_domains;
198         bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
199
200         if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
201                 domain = AMDGPU_GEM_DOMAIN_CPU;
202                 sg = false;
203         }
204
205         unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
206 }
207
208
209 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
210  *  reservation object.
211  *
212  * @bo: [IN] Remove eviction fence(s) from this BO
213  * @ef: [IN] This eviction fence is removed if it
214  *  is present in the shared list.
215  *
216  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
217  */
218 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
219                                         struct amdgpu_amdkfd_fence *ef)
220 {
221         struct dma_resv *resv = bo->tbo.base.resv;
222         struct dma_resv_list *old, *new;
223         unsigned int i, j, k;
224
225         if (!ef)
226                 return -EINVAL;
227
228         old = dma_resv_get_list(resv);
229         if (!old)
230                 return 0;
231
232         new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
233                       GFP_KERNEL);
234         if (!new)
235                 return -ENOMEM;
236
237         /* Go through all the shared fences in the resevation object and sort
238          * the interesting ones to the end of the list.
239          */
240         for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
241                 struct dma_fence *f;
242
243                 f = rcu_dereference_protected(old->shared[i],
244                                               dma_resv_held(resv));
245
246                 if (f->context == ef->base.context)
247                         RCU_INIT_POINTER(new->shared[--j], f);
248                 else
249                         RCU_INIT_POINTER(new->shared[k++], f);
250         }
251         new->shared_max = old->shared_max;
252         new->shared_count = k;
253
254         /* Install the new fence list, seqcount provides the barriers */
255         preempt_disable();
256         write_seqcount_begin(&resv->seq);
257         RCU_INIT_POINTER(resv->fence, new);
258         write_seqcount_end(&resv->seq);
259         preempt_enable();
260
261         /* Drop the references to the removed fences or move them to ef_list */
262         for (i = j, k = 0; i < old->shared_count; ++i) {
263                 struct dma_fence *f;
264
265                 f = rcu_dereference_protected(new->shared[i],
266                                               dma_resv_held(resv));
267                 dma_fence_put(f);
268         }
269         kfree_rcu(old, rcu);
270
271         return 0;
272 }
273
274 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
275                                      bool wait)
276 {
277         struct ttm_operation_ctx ctx = { false, false };
278         int ret;
279
280         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
281                  "Called with userptr BO"))
282                 return -EINVAL;
283
284         amdgpu_bo_placement_from_domain(bo, domain);
285
286         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
287         if (ret)
288                 goto validate_fail;
289         if (wait)
290                 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
291
292 validate_fail:
293         return ret;
294 }
295
296 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
297 {
298         struct amdgpu_vm_parser *p = param;
299
300         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
301 }
302
303 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
304  *
305  * Page directories are not updated here because huge page handling
306  * during page table updates can invalidate page directory entries
307  * again. Page directories are only updated after updating page
308  * tables.
309  */
310 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
311 {
312         struct amdgpu_bo *pd = vm->root.base.bo;
313         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
314         struct amdgpu_vm_parser param;
315         int ret;
316
317         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
318         param.wait = false;
319
320         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
321                                         &param);
322         if (ret) {
323                 pr_err("amdgpu: failed to validate PT BOs\n");
324                 return ret;
325         }
326
327         ret = amdgpu_amdkfd_validate(&param, pd);
328         if (ret) {
329                 pr_err("amdgpu: failed to validate PD\n");
330                 return ret;
331         }
332
333         vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
334
335         if (vm->use_cpu_for_update) {
336                 ret = amdgpu_bo_kmap(pd, NULL);
337                 if (ret) {
338                         pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
339                         return ret;
340                 }
341         }
342
343         return 0;
344 }
345
346 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
347 {
348         struct amdgpu_bo *pd = vm->root.base.bo;
349         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
350         int ret;
351
352         ret = amdgpu_vm_update_directories(adev, vm);
353         if (ret)
354                 return ret;
355
356         return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
357 }
358
359 /* add_bo_to_vm - Add a BO to a VM
360  *
361  * Everything that needs to bo done only once when a BO is first added
362  * to a VM. It can later be mapped and unmapped many times without
363  * repeating these steps.
364  *
365  * 1. Allocate and initialize BO VA entry data structure
366  * 2. Add BO to the VM
367  * 3. Determine ASIC-specific PTE flags
368  * 4. Alloc page tables and directories if needed
369  * 4a.  Validate new page tables and directories
370  */
371 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
372                 struct amdgpu_vm *vm, bool is_aql,
373                 struct kfd_bo_va_list **p_bo_va_entry)
374 {
375         int ret;
376         struct kfd_bo_va_list *bo_va_entry;
377         struct amdgpu_bo *bo = mem->bo;
378         uint64_t va = mem->va;
379         struct list_head *list_bo_va = &mem->bo_va_list;
380         unsigned long bo_size = bo->tbo.mem.size;
381
382         if (!va) {
383                 pr_err("Invalid VA when adding BO to VM\n");
384                 return -EINVAL;
385         }
386
387         if (is_aql)
388                 va += bo_size;
389
390         bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
391         if (!bo_va_entry)
392                 return -ENOMEM;
393
394         pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
395                         va + bo_size, vm);
396
397         /* Add BO to VM internal data structures*/
398         bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
399         if (!bo_va_entry->bo_va) {
400                 ret = -EINVAL;
401                 pr_err("Failed to add BO object to VM. ret == %d\n",
402                                 ret);
403                 goto err_vmadd;
404         }
405
406         bo_va_entry->va = va;
407         bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
408                                                          mem->mapping_flags);
409         bo_va_entry->kgd_dev = (void *)adev;
410         list_add(&bo_va_entry->bo_list, list_bo_va);
411
412         if (p_bo_va_entry)
413                 *p_bo_va_entry = bo_va_entry;
414
415         /* Allocate validate page tables if needed */
416         ret = vm_validate_pt_pd_bos(vm);
417         if (ret) {
418                 pr_err("validate_pt_pd_bos() failed\n");
419                 goto err_alloc_pts;
420         }
421
422         return 0;
423
424 err_alloc_pts:
425         amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
426         list_del(&bo_va_entry->bo_list);
427 err_vmadd:
428         kfree(bo_va_entry);
429         return ret;
430 }
431
432 static void remove_bo_from_vm(struct amdgpu_device *adev,
433                 struct kfd_bo_va_list *entry, unsigned long size)
434 {
435         pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
436                         entry->va,
437                         entry->va + size, entry);
438         amdgpu_vm_bo_rmv(adev, entry->bo_va);
439         list_del(&entry->bo_list);
440         kfree(entry);
441 }
442
443 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
444                                 struct amdkfd_process_info *process_info,
445                                 bool userptr)
446 {
447         struct ttm_validate_buffer *entry = &mem->validate_list;
448         struct amdgpu_bo *bo = mem->bo;
449
450         INIT_LIST_HEAD(&entry->head);
451         entry->num_shared = 1;
452         entry->bo = &bo->tbo;
453         mutex_lock(&process_info->lock);
454         if (userptr)
455                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
456         else
457                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
458         mutex_unlock(&process_info->lock);
459 }
460
461 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
462                 struct amdkfd_process_info *process_info)
463 {
464         struct ttm_validate_buffer *bo_list_entry;
465
466         bo_list_entry = &mem->validate_list;
467         mutex_lock(&process_info->lock);
468         list_del(&bo_list_entry->head);
469         mutex_unlock(&process_info->lock);
470 }
471
472 /* Initializes user pages. It registers the MMU notifier and validates
473  * the userptr BO in the GTT domain.
474  *
475  * The BO must already be on the userptr_valid_list. Otherwise an
476  * eviction and restore may happen that leaves the new BO unmapped
477  * with the user mode queues running.
478  *
479  * Takes the process_info->lock to protect against concurrent restore
480  * workers.
481  *
482  * Returns 0 for success, negative errno for errors.
483  */
484 static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
485                            uint64_t user_addr)
486 {
487         struct amdkfd_process_info *process_info = mem->process_info;
488         struct amdgpu_bo *bo = mem->bo;
489         struct ttm_operation_ctx ctx = { true, false };
490         int ret = 0;
491
492         mutex_lock(&process_info->lock);
493
494         ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
495         if (ret) {
496                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
497                 goto out;
498         }
499
500         ret = amdgpu_mn_register(bo, user_addr);
501         if (ret) {
502                 pr_err("%s: Failed to register MMU notifier: %d\n",
503                        __func__, ret);
504                 goto out;
505         }
506
507         ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
508         if (ret) {
509                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
510                 goto unregister_out;
511         }
512
513         ret = amdgpu_bo_reserve(bo, true);
514         if (ret) {
515                 pr_err("%s: Failed to reserve BO\n", __func__);
516                 goto release_out;
517         }
518         amdgpu_bo_placement_from_domain(bo, mem->domain);
519         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
520         if (ret)
521                 pr_err("%s: failed to validate BO\n", __func__);
522         amdgpu_bo_unreserve(bo);
523
524 release_out:
525         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
526 unregister_out:
527         if (ret)
528                 amdgpu_mn_unregister(bo);
529 out:
530         mutex_unlock(&process_info->lock);
531         return ret;
532 }
533
534 /* Reserving a BO and its page table BOs must happen atomically to
535  * avoid deadlocks. Some operations update multiple VMs at once. Track
536  * all the reservation info in a context structure. Optionally a sync
537  * object can track VM updates.
538  */
539 struct bo_vm_reservation_context {
540         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
541         unsigned int n_vms;                 /* Number of VMs reserved       */
542         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
543         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
544         struct list_head list, duplicates;  /* BO lists                     */
545         struct amdgpu_sync *sync;           /* Pointer to sync object       */
546         bool reserved;                      /* Whether BOs are reserved     */
547 };
548
549 enum bo_vm_match {
550         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
551         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
552         BO_VM_ALL,              /* Match all VMs a BO was added to    */
553 };
554
555 /**
556  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
557  * @mem: KFD BO structure.
558  * @vm: the VM to reserve.
559  * @ctx: the struct that will be used in unreserve_bo_and_vms().
560  */
561 static int reserve_bo_and_vm(struct kgd_mem *mem,
562                               struct amdgpu_vm *vm,
563                               struct bo_vm_reservation_context *ctx)
564 {
565         struct amdgpu_bo *bo = mem->bo;
566         int ret;
567
568         WARN_ON(!vm);
569
570         ctx->reserved = false;
571         ctx->n_vms = 1;
572         ctx->sync = &mem->sync;
573
574         INIT_LIST_HEAD(&ctx->list);
575         INIT_LIST_HEAD(&ctx->duplicates);
576
577         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
578         if (!ctx->vm_pd)
579                 return -ENOMEM;
580
581         ctx->kfd_bo.priority = 0;
582         ctx->kfd_bo.tv.bo = &bo->tbo;
583         ctx->kfd_bo.tv.num_shared = 1;
584         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
585
586         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
587
588         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
589                                      false, &ctx->duplicates, true);
590         if (!ret)
591                 ctx->reserved = true;
592         else {
593                 pr_err("Failed to reserve buffers in ttm\n");
594                 kfree(ctx->vm_pd);
595                 ctx->vm_pd = NULL;
596         }
597
598         return ret;
599 }
600
601 /**
602  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
603  * @mem: KFD BO structure.
604  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
605  * is used. Otherwise, a single VM associated with the BO.
606  * @map_type: the mapping status that will be used to filter the VMs.
607  * @ctx: the struct that will be used in unreserve_bo_and_vms().
608  *
609  * Returns 0 for success, negative for failure.
610  */
611 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
612                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
613                                 struct bo_vm_reservation_context *ctx)
614 {
615         struct amdgpu_bo *bo = mem->bo;
616         struct kfd_bo_va_list *entry;
617         unsigned int i;
618         int ret;
619
620         ctx->reserved = false;
621         ctx->n_vms = 0;
622         ctx->vm_pd = NULL;
623         ctx->sync = &mem->sync;
624
625         INIT_LIST_HEAD(&ctx->list);
626         INIT_LIST_HEAD(&ctx->duplicates);
627
628         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
629                 if ((vm && vm != entry->bo_va->base.vm) ||
630                         (entry->is_mapped != map_type
631                         && map_type != BO_VM_ALL))
632                         continue;
633
634                 ctx->n_vms++;
635         }
636
637         if (ctx->n_vms != 0) {
638                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
639                                      GFP_KERNEL);
640                 if (!ctx->vm_pd)
641                         return -ENOMEM;
642         }
643
644         ctx->kfd_bo.priority = 0;
645         ctx->kfd_bo.tv.bo = &bo->tbo;
646         ctx->kfd_bo.tv.num_shared = 1;
647         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
648
649         i = 0;
650         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
651                 if ((vm && vm != entry->bo_va->base.vm) ||
652                         (entry->is_mapped != map_type
653                         && map_type != BO_VM_ALL))
654                         continue;
655
656                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
657                                 &ctx->vm_pd[i]);
658                 i++;
659         }
660
661         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
662                                      false, &ctx->duplicates, true);
663         if (!ret)
664                 ctx->reserved = true;
665         else
666                 pr_err("Failed to reserve buffers in ttm.\n");
667
668         if (ret) {
669                 kfree(ctx->vm_pd);
670                 ctx->vm_pd = NULL;
671         }
672
673         return ret;
674 }
675
676 /**
677  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
678  * @ctx: Reservation context to unreserve
679  * @wait: Optionally wait for a sync object representing pending VM updates
680  * @intr: Whether the wait is interruptible
681  *
682  * Also frees any resources allocated in
683  * reserve_bo_and_(cond_)vm(s). Returns the status from
684  * amdgpu_sync_wait.
685  */
686 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
687                                  bool wait, bool intr)
688 {
689         int ret = 0;
690
691         if (wait)
692                 ret = amdgpu_sync_wait(ctx->sync, intr);
693
694         if (ctx->reserved)
695                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
696         kfree(ctx->vm_pd);
697
698         ctx->sync = NULL;
699
700         ctx->reserved = false;
701         ctx->vm_pd = NULL;
702
703         return ret;
704 }
705
706 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
707                                 struct kfd_bo_va_list *entry,
708                                 struct amdgpu_sync *sync)
709 {
710         struct amdgpu_bo_va *bo_va = entry->bo_va;
711         struct amdgpu_vm *vm = bo_va->base.vm;
712
713         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
714
715         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
716
717         amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
718
719         return 0;
720 }
721
722 static int update_gpuvm_pte(struct amdgpu_device *adev,
723                 struct kfd_bo_va_list *entry,
724                 struct amdgpu_sync *sync)
725 {
726         int ret;
727         struct amdgpu_bo_va *bo_va = entry->bo_va;
728
729         /* Update the page tables  */
730         ret = amdgpu_vm_bo_update(adev, bo_va, false);
731         if (ret) {
732                 pr_err("amdgpu_vm_bo_update failed\n");
733                 return ret;
734         }
735
736         return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
737 }
738
739 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
740                 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
741                 bool no_update_pte)
742 {
743         int ret;
744
745         /* Set virtual address for the allocation */
746         ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
747                                amdgpu_bo_size(entry->bo_va->base.bo),
748                                entry->pte_flags);
749         if (ret) {
750                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
751                                 entry->va, ret);
752                 return ret;
753         }
754
755         if (no_update_pte)
756                 return 0;
757
758         ret = update_gpuvm_pte(adev, entry, sync);
759         if (ret) {
760                 pr_err("update_gpuvm_pte() failed\n");
761                 goto update_gpuvm_pte_failed;
762         }
763
764         return 0;
765
766 update_gpuvm_pte_failed:
767         unmap_bo_from_gpuvm(adev, entry, sync);
768         return ret;
769 }
770
771 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
772 {
773         struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
774
775         if (!sg)
776                 return NULL;
777         if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
778                 kfree(sg);
779                 return NULL;
780         }
781         sg->sgl->dma_address = addr;
782         sg->sgl->length = size;
783 #ifdef CONFIG_NEED_SG_DMA_LENGTH
784         sg->sgl->dma_length = size;
785 #endif
786         return sg;
787 }
788
789 static int process_validate_vms(struct amdkfd_process_info *process_info)
790 {
791         struct amdgpu_vm *peer_vm;
792         int ret;
793
794         list_for_each_entry(peer_vm, &process_info->vm_list_head,
795                             vm_list_node) {
796                 ret = vm_validate_pt_pd_bos(peer_vm);
797                 if (ret)
798                         return ret;
799         }
800
801         return 0;
802 }
803
804 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
805                                  struct amdgpu_sync *sync)
806 {
807         struct amdgpu_vm *peer_vm;
808         int ret;
809
810         list_for_each_entry(peer_vm, &process_info->vm_list_head,
811                             vm_list_node) {
812                 struct amdgpu_bo *pd = peer_vm->root.base.bo;
813
814                 ret = amdgpu_sync_resv(NULL,
815                                         sync, pd->tbo.base.resv,
816                                         AMDGPU_FENCE_OWNER_KFD, false);
817                 if (ret)
818                         return ret;
819         }
820
821         return 0;
822 }
823
824 static int process_update_pds(struct amdkfd_process_info *process_info,
825                               struct amdgpu_sync *sync)
826 {
827         struct amdgpu_vm *peer_vm;
828         int ret;
829
830         list_for_each_entry(peer_vm, &process_info->vm_list_head,
831                             vm_list_node) {
832                 ret = vm_update_pds(peer_vm, sync);
833                 if (ret)
834                         return ret;
835         }
836
837         return 0;
838 }
839
840 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
841                        struct dma_fence **ef)
842 {
843         struct amdkfd_process_info *info = NULL;
844         int ret;
845
846         if (!*process_info) {
847                 info = kzalloc(sizeof(*info), GFP_KERNEL);
848                 if (!info)
849                         return -ENOMEM;
850
851                 mutex_init(&info->lock);
852                 INIT_LIST_HEAD(&info->vm_list_head);
853                 INIT_LIST_HEAD(&info->kfd_bo_list);
854                 INIT_LIST_HEAD(&info->userptr_valid_list);
855                 INIT_LIST_HEAD(&info->userptr_inval_list);
856
857                 info->eviction_fence =
858                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
859                                                    current->mm);
860                 if (!info->eviction_fence) {
861                         pr_err("Failed to create eviction fence\n");
862                         ret = -ENOMEM;
863                         goto create_evict_fence_fail;
864                 }
865
866                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
867                 atomic_set(&info->evicted_bos, 0);
868                 INIT_DELAYED_WORK(&info->restore_userptr_work,
869                                   amdgpu_amdkfd_restore_userptr_worker);
870
871                 *process_info = info;
872                 *ef = dma_fence_get(&info->eviction_fence->base);
873         }
874
875         vm->process_info = *process_info;
876
877         /* Validate page directory and attach eviction fence */
878         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
879         if (ret)
880                 goto reserve_pd_fail;
881         ret = vm_validate_pt_pd_bos(vm);
882         if (ret) {
883                 pr_err("validate_pt_pd_bos() failed\n");
884                 goto validate_pd_fail;
885         }
886         ret = amdgpu_bo_sync_wait(vm->root.base.bo,
887                                   AMDGPU_FENCE_OWNER_KFD, false);
888         if (ret)
889                 goto wait_pd_fail;
890         ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
891         if (ret)
892                 goto reserve_shared_fail;
893         amdgpu_bo_fence(vm->root.base.bo,
894                         &vm->process_info->eviction_fence->base, true);
895         amdgpu_bo_unreserve(vm->root.base.bo);
896
897         /* Update process info */
898         mutex_lock(&vm->process_info->lock);
899         list_add_tail(&vm->vm_list_node,
900                         &(vm->process_info->vm_list_head));
901         vm->process_info->n_vms++;
902         mutex_unlock(&vm->process_info->lock);
903
904         return 0;
905
906 reserve_shared_fail:
907 wait_pd_fail:
908 validate_pd_fail:
909         amdgpu_bo_unreserve(vm->root.base.bo);
910 reserve_pd_fail:
911         vm->process_info = NULL;
912         if (info) {
913                 /* Two fence references: one in info and one in *ef */
914                 dma_fence_put(&info->eviction_fence->base);
915                 dma_fence_put(*ef);
916                 *ef = NULL;
917                 *process_info = NULL;
918                 put_pid(info->pid);
919 create_evict_fence_fail:
920                 mutex_destroy(&info->lock);
921                 kfree(info);
922         }
923         return ret;
924 }
925
926 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
927                                           void **vm, void **process_info,
928                                           struct dma_fence **ef)
929 {
930         struct amdgpu_device *adev = get_amdgpu_device(kgd);
931         struct amdgpu_vm *new_vm;
932         int ret;
933
934         new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
935         if (!new_vm)
936                 return -ENOMEM;
937
938         /* Initialize AMDGPU part of the VM */
939         ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
940         if (ret) {
941                 pr_err("Failed init vm ret %d\n", ret);
942                 goto amdgpu_vm_init_fail;
943         }
944
945         /* Initialize KFD part of the VM and process info */
946         ret = init_kfd_vm(new_vm, process_info, ef);
947         if (ret)
948                 goto init_kfd_vm_fail;
949
950         *vm = (void *) new_vm;
951
952         return 0;
953
954 init_kfd_vm_fail:
955         amdgpu_vm_fini(adev, new_vm);
956 amdgpu_vm_init_fail:
957         kfree(new_vm);
958         return ret;
959 }
960
961 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
962                                            struct file *filp, unsigned int pasid,
963                                            void **vm, void **process_info,
964                                            struct dma_fence **ef)
965 {
966         struct amdgpu_device *adev = get_amdgpu_device(kgd);
967         struct drm_file *drm_priv = filp->private_data;
968         struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
969         struct amdgpu_vm *avm = &drv_priv->vm;
970         int ret;
971
972         /* Already a compute VM? */
973         if (avm->process_info)
974                 return -EINVAL;
975
976         /* Convert VM into a compute VM */
977         ret = amdgpu_vm_make_compute(adev, avm, pasid);
978         if (ret)
979                 return ret;
980
981         /* Initialize KFD part of the VM and process info */
982         ret = init_kfd_vm(avm, process_info, ef);
983         if (ret)
984                 return ret;
985
986         *vm = (void *)avm;
987
988         return 0;
989 }
990
991 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
992                                     struct amdgpu_vm *vm)
993 {
994         struct amdkfd_process_info *process_info = vm->process_info;
995         struct amdgpu_bo *pd = vm->root.base.bo;
996
997         if (!process_info)
998                 return;
999
1000         /* Release eviction fence from PD */
1001         amdgpu_bo_reserve(pd, false);
1002         amdgpu_bo_fence(pd, NULL, false);
1003         amdgpu_bo_unreserve(pd);
1004
1005         /* Update process info */
1006         mutex_lock(&process_info->lock);
1007         process_info->n_vms--;
1008         list_del(&vm->vm_list_node);
1009         mutex_unlock(&process_info->lock);
1010
1011         /* Release per-process resources when last compute VM is destroyed */
1012         if (!process_info->n_vms) {
1013                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1014                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1015                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1016
1017                 dma_fence_put(&process_info->eviction_fence->base);
1018                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1019                 put_pid(process_info->pid);
1020                 mutex_destroy(&process_info->lock);
1021                 kfree(process_info);
1022         }
1023 }
1024
1025 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1026 {
1027         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1028         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1029
1030         if (WARN_ON(!kgd || !vm))
1031                 return;
1032
1033         pr_debug("Destroying process vm %p\n", vm);
1034
1035         /* Release the VM context */
1036         amdgpu_vm_fini(adev, avm);
1037         kfree(vm);
1038 }
1039
1040 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1041 {
1042         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1043         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1044
1045         if (WARN_ON(!kgd || !vm))
1046                 return;
1047
1048         pr_debug("Releasing process vm %p\n", vm);
1049
1050         /* The original pasid of amdgpu vm has already been
1051          * released during making a amdgpu vm to a compute vm
1052          * The current pasid is managed by kfd and will be
1053          * released on kfd process destroy. Set amdgpu pasid
1054          * to 0 to avoid duplicate release.
1055          */
1056         amdgpu_vm_release_compute(adev, avm);
1057 }
1058
1059 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1060 {
1061         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1062         struct amdgpu_bo *pd = avm->root.base.bo;
1063         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1064
1065         if (adev->asic_type < CHIP_VEGA10)
1066                 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1067         return avm->pd_phys_addr;
1068 }
1069
1070 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1071                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1072                 void *vm, struct kgd_mem **mem,
1073                 uint64_t *offset, uint32_t flags)
1074 {
1075         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1076         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1077         enum ttm_bo_type bo_type = ttm_bo_type_device;
1078         struct sg_table *sg = NULL;
1079         uint64_t user_addr = 0;
1080         struct amdgpu_bo *bo;
1081         struct amdgpu_bo_param bp;
1082         int byte_align;
1083         u32 domain, alloc_domain;
1084         u64 alloc_flags;
1085         uint32_t mapping_flags;
1086         int ret;
1087
1088         /*
1089          * Check on which domain to allocate BO
1090          */
1091         if (flags & ALLOC_MEM_FLAGS_VRAM) {
1092                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1093                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
1094                 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1095                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1096                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1097         } else if (flags & ALLOC_MEM_FLAGS_GTT) {
1098                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1099                 alloc_flags = 0;
1100         } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1101                 domain = AMDGPU_GEM_DOMAIN_GTT;
1102                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1103                 alloc_flags = 0;
1104                 if (!offset || !*offset)
1105                         return -EINVAL;
1106                 user_addr = *offset;
1107         } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
1108                         ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1109                 domain = AMDGPU_GEM_DOMAIN_GTT;
1110                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1111                 bo_type = ttm_bo_type_sg;
1112                 alloc_flags = 0;
1113                 if (size > UINT_MAX)
1114                         return -EINVAL;
1115                 sg = create_doorbell_sg(*offset, size);
1116                 if (!sg)
1117                         return -ENOMEM;
1118         } else {
1119                 return -EINVAL;
1120         }
1121
1122         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1123         if (!*mem) {
1124                 ret = -ENOMEM;
1125                 goto err;
1126         }
1127         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1128         mutex_init(&(*mem)->lock);
1129         (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1130
1131         /* Workaround for AQL queue wraparound bug. Map the same
1132          * memory twice. That means we only actually allocate half
1133          * the memory.
1134          */
1135         if ((*mem)->aql_queue)
1136                 size = size >> 1;
1137
1138         /* Workaround for TLB bug on older VI chips */
1139         byte_align = (adev->family == AMDGPU_FAMILY_VI &&
1140                         adev->asic_type != CHIP_FIJI &&
1141                         adev->asic_type != CHIP_POLARIS10 &&
1142                         adev->asic_type != CHIP_POLARIS11 &&
1143                         adev->asic_type != CHIP_POLARIS12) ?
1144                         VI_BO_SIZE_ALIGN : 1;
1145
1146         mapping_flags = AMDGPU_VM_PAGE_READABLE;
1147         if (flags & ALLOC_MEM_FLAGS_WRITABLE)
1148                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
1149         if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
1150                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1151         if (flags & ALLOC_MEM_FLAGS_COHERENT)
1152                 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1153         else
1154                 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1155         (*mem)->mapping_flags = mapping_flags;
1156
1157         amdgpu_sync_create(&(*mem)->sync);
1158
1159         ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1160         if (ret) {
1161                 pr_debug("Insufficient system memory\n");
1162                 goto err_reserve_limit;
1163         }
1164
1165         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1166                         va, size, domain_string(alloc_domain));
1167
1168         memset(&bp, 0, sizeof(bp));
1169         bp.size = size;
1170         bp.byte_align = byte_align;
1171         bp.domain = alloc_domain;
1172         bp.flags = alloc_flags;
1173         bp.type = bo_type;
1174         bp.resv = NULL;
1175         ret = amdgpu_bo_create(adev, &bp, &bo);
1176         if (ret) {
1177                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1178                                 domain_string(alloc_domain), ret);
1179                 goto err_bo_create;
1180         }
1181         if (bo_type == ttm_bo_type_sg) {
1182                 bo->tbo.sg = sg;
1183                 bo->tbo.ttm->sg = sg;
1184         }
1185         bo->kfd_bo = *mem;
1186         (*mem)->bo = bo;
1187         if (user_addr)
1188                 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1189
1190         (*mem)->va = va;
1191         (*mem)->domain = domain;
1192         (*mem)->mapped_to_gpu_memory = 0;
1193         (*mem)->process_info = avm->process_info;
1194         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1195
1196         if (user_addr) {
1197                 ret = init_user_pages(*mem, current->mm, user_addr);
1198                 if (ret)
1199                         goto allocate_init_user_pages_failed;
1200         }
1201
1202         if (offset)
1203                 *offset = amdgpu_bo_mmap_offset(bo);
1204
1205         return 0;
1206
1207 allocate_init_user_pages_failed:
1208         remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1209         amdgpu_bo_unref(&bo);
1210         /* Don't unreserve system mem limit twice */
1211         goto err_reserve_limit;
1212 err_bo_create:
1213         unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1214 err_reserve_limit:
1215         mutex_destroy(&(*mem)->lock);
1216         kfree(*mem);
1217 err:
1218         if (sg) {
1219                 sg_free_table(sg);
1220                 kfree(sg);
1221         }
1222         return ret;
1223 }
1224
1225 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1226                 struct kgd_dev *kgd, struct kgd_mem *mem)
1227 {
1228         struct amdkfd_process_info *process_info = mem->process_info;
1229         unsigned long bo_size = mem->bo->tbo.mem.size;
1230         struct kfd_bo_va_list *entry, *tmp;
1231         struct bo_vm_reservation_context ctx;
1232         struct ttm_validate_buffer *bo_list_entry;
1233         int ret;
1234
1235         mutex_lock(&mem->lock);
1236
1237         if (mem->mapped_to_gpu_memory > 0) {
1238                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1239                                 mem->va, bo_size);
1240                 mutex_unlock(&mem->lock);
1241                 return -EBUSY;
1242         }
1243
1244         mutex_unlock(&mem->lock);
1245         /* lock is not needed after this, since mem is unused and will
1246          * be freed anyway
1247          */
1248
1249         /* No more MMU notifiers */
1250         amdgpu_mn_unregister(mem->bo);
1251
1252         /* Make sure restore workers don't access the BO any more */
1253         bo_list_entry = &mem->validate_list;
1254         mutex_lock(&process_info->lock);
1255         list_del(&bo_list_entry->head);
1256         mutex_unlock(&process_info->lock);
1257
1258         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1259         if (unlikely(ret))
1260                 return ret;
1261
1262         /* The eviction fence should be removed by the last unmap.
1263          * TODO: Log an error condition if the bo still has the eviction fence
1264          * attached
1265          */
1266         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1267                                         process_info->eviction_fence);
1268         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1269                 mem->va + bo_size * (1 + mem->aql_queue));
1270
1271         /* Remove from VM internal data structures */
1272         list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1273                 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1274                                 entry, bo_size);
1275
1276         ret = unreserve_bo_and_vms(&ctx, false, false);
1277
1278         /* Free the sync object */
1279         amdgpu_sync_free(&mem->sync);
1280
1281         /* If the SG is not NULL, it's one we created for a doorbell or mmio
1282          * remap BO. We need to free it.
1283          */
1284         if (mem->bo->tbo.sg) {
1285                 sg_free_table(mem->bo->tbo.sg);
1286                 kfree(mem->bo->tbo.sg);
1287         }
1288
1289         /* Free the BO*/
1290         amdgpu_bo_unref(&mem->bo);
1291         mutex_destroy(&mem->lock);
1292         kfree(mem);
1293
1294         return ret;
1295 }
1296
1297 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1298                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1299 {
1300         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1301         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1302         int ret;
1303         struct amdgpu_bo *bo;
1304         uint32_t domain;
1305         struct kfd_bo_va_list *entry;
1306         struct bo_vm_reservation_context ctx;
1307         struct kfd_bo_va_list *bo_va_entry = NULL;
1308         struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1309         unsigned long bo_size;
1310         bool is_invalid_userptr = false;
1311
1312         bo = mem->bo;
1313         if (!bo) {
1314                 pr_err("Invalid BO when mapping memory to GPU\n");
1315                 return -EINVAL;
1316         }
1317
1318         /* Make sure restore is not running concurrently. Since we
1319          * don't map invalid userptr BOs, we rely on the next restore
1320          * worker to do the mapping
1321          */
1322         mutex_lock(&mem->process_info->lock);
1323
1324         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1325          * sure that the MMU notifier is no longer running
1326          * concurrently and the queues are actually stopped
1327          */
1328         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1329                 down_write(&current->mm->mmap_sem);
1330                 is_invalid_userptr = atomic_read(&mem->invalid);
1331                 up_write(&current->mm->mmap_sem);
1332         }
1333
1334         mutex_lock(&mem->lock);
1335
1336         domain = mem->domain;
1337         bo_size = bo->tbo.mem.size;
1338
1339         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1340                         mem->va,
1341                         mem->va + bo_size * (1 + mem->aql_queue),
1342                         vm, domain_string(domain));
1343
1344         ret = reserve_bo_and_vm(mem, vm, &ctx);
1345         if (unlikely(ret))
1346                 goto out;
1347
1348         /* Userptr can be marked as "not invalid", but not actually be
1349          * validated yet (still in the system domain). In that case
1350          * the queues are still stopped and we can leave mapping for
1351          * the next restore worker
1352          */
1353         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1354             bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1355                 is_invalid_userptr = true;
1356
1357         if (check_if_add_bo_to_vm(avm, mem)) {
1358                 ret = add_bo_to_vm(adev, mem, avm, false,
1359                                 &bo_va_entry);
1360                 if (ret)
1361                         goto add_bo_to_vm_failed;
1362                 if (mem->aql_queue) {
1363                         ret = add_bo_to_vm(adev, mem, avm,
1364                                         true, &bo_va_entry_aql);
1365                         if (ret)
1366                                 goto add_bo_to_vm_failed_aql;
1367                 }
1368         } else {
1369                 ret = vm_validate_pt_pd_bos(avm);
1370                 if (unlikely(ret))
1371                         goto add_bo_to_vm_failed;
1372         }
1373
1374         if (mem->mapped_to_gpu_memory == 0 &&
1375             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1376                 /* Validate BO only once. The eviction fence gets added to BO
1377                  * the first time it is mapped. Validate will wait for all
1378                  * background evictions to complete.
1379                  */
1380                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1381                 if (ret) {
1382                         pr_debug("Validate failed\n");
1383                         goto map_bo_to_gpuvm_failed;
1384                 }
1385         }
1386
1387         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1388                 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1389                         pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1390                                         entry->va, entry->va + bo_size,
1391                                         entry);
1392
1393                         ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1394                                               is_invalid_userptr);
1395                         if (ret) {
1396                                 pr_err("Failed to map bo to gpuvm\n");
1397                                 goto map_bo_to_gpuvm_failed;
1398                         }
1399
1400                         ret = vm_update_pds(vm, ctx.sync);
1401                         if (ret) {
1402                                 pr_err("Failed to update page directories\n");
1403                                 goto map_bo_to_gpuvm_failed;
1404                         }
1405
1406                         entry->is_mapped = true;
1407                         mem->mapped_to_gpu_memory++;
1408                         pr_debug("\t INC mapping count %d\n",
1409                                         mem->mapped_to_gpu_memory);
1410                 }
1411         }
1412
1413         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1414                 amdgpu_bo_fence(bo,
1415                                 &avm->process_info->eviction_fence->base,
1416                                 true);
1417         ret = unreserve_bo_and_vms(&ctx, false, false);
1418
1419         goto out;
1420
1421 map_bo_to_gpuvm_failed:
1422         if (bo_va_entry_aql)
1423                 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1424 add_bo_to_vm_failed_aql:
1425         if (bo_va_entry)
1426                 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1427 add_bo_to_vm_failed:
1428         unreserve_bo_and_vms(&ctx, false, false);
1429 out:
1430         mutex_unlock(&mem->process_info->lock);
1431         mutex_unlock(&mem->lock);
1432         return ret;
1433 }
1434
1435 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1436                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1437 {
1438         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1439         struct amdkfd_process_info *process_info =
1440                 ((struct amdgpu_vm *)vm)->process_info;
1441         unsigned long bo_size = mem->bo->tbo.mem.size;
1442         struct kfd_bo_va_list *entry;
1443         struct bo_vm_reservation_context ctx;
1444         int ret;
1445
1446         mutex_lock(&mem->lock);
1447
1448         ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1449         if (unlikely(ret))
1450                 goto out;
1451         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1452         if (ctx.n_vms == 0) {
1453                 ret = -EINVAL;
1454                 goto unreserve_out;
1455         }
1456
1457         ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1458         if (unlikely(ret))
1459                 goto unreserve_out;
1460
1461         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1462                 mem->va,
1463                 mem->va + bo_size * (1 + mem->aql_queue),
1464                 vm);
1465
1466         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1467                 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1468                         pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1469                                         entry->va,
1470                                         entry->va + bo_size,
1471                                         entry);
1472
1473                         ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1474                         if (ret == 0) {
1475                                 entry->is_mapped = false;
1476                         } else {
1477                                 pr_err("failed to unmap VA 0x%llx\n",
1478                                                 mem->va);
1479                                 goto unreserve_out;
1480                         }
1481
1482                         mem->mapped_to_gpu_memory--;
1483                         pr_debug("\t DEC mapping count %d\n",
1484                                         mem->mapped_to_gpu_memory);
1485                 }
1486         }
1487
1488         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1489          * required.
1490          */
1491         if (mem->mapped_to_gpu_memory == 0 &&
1492             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1493                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1494                                                 process_info->eviction_fence);
1495
1496 unreserve_out:
1497         unreserve_bo_and_vms(&ctx, false, false);
1498 out:
1499         mutex_unlock(&mem->lock);
1500         return ret;
1501 }
1502
1503 int amdgpu_amdkfd_gpuvm_sync_memory(
1504                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1505 {
1506         struct amdgpu_sync sync;
1507         int ret;
1508
1509         amdgpu_sync_create(&sync);
1510
1511         mutex_lock(&mem->lock);
1512         amdgpu_sync_clone(&mem->sync, &sync);
1513         mutex_unlock(&mem->lock);
1514
1515         ret = amdgpu_sync_wait(&sync, intr);
1516         amdgpu_sync_free(&sync);
1517         return ret;
1518 }
1519
1520 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1521                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1522 {
1523         int ret;
1524         struct amdgpu_bo *bo = mem->bo;
1525
1526         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1527                 pr_err("userptr can't be mapped to kernel\n");
1528                 return -EINVAL;
1529         }
1530
1531         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1532          * this BO in BO's restoring after eviction.
1533          */
1534         mutex_lock(&mem->process_info->lock);
1535
1536         ret = amdgpu_bo_reserve(bo, true);
1537         if (ret) {
1538                 pr_err("Failed to reserve bo. ret %d\n", ret);
1539                 goto bo_reserve_failed;
1540         }
1541
1542         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1543         if (ret) {
1544                 pr_err("Failed to pin bo. ret %d\n", ret);
1545                 goto pin_failed;
1546         }
1547
1548         ret = amdgpu_bo_kmap(bo, kptr);
1549         if (ret) {
1550                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1551                 goto kmap_failed;
1552         }
1553
1554         amdgpu_amdkfd_remove_eviction_fence(
1555                 bo, mem->process_info->eviction_fence);
1556         list_del_init(&mem->validate_list.head);
1557
1558         if (size)
1559                 *size = amdgpu_bo_size(bo);
1560
1561         amdgpu_bo_unreserve(bo);
1562
1563         mutex_unlock(&mem->process_info->lock);
1564         return 0;
1565
1566 kmap_failed:
1567         amdgpu_bo_unpin(bo);
1568 pin_failed:
1569         amdgpu_bo_unreserve(bo);
1570 bo_reserve_failed:
1571         mutex_unlock(&mem->process_info->lock);
1572
1573         return ret;
1574 }
1575
1576 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1577                                               struct kfd_vm_fault_info *mem)
1578 {
1579         struct amdgpu_device *adev;
1580
1581         adev = (struct amdgpu_device *)kgd;
1582         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1583                 *mem = *adev->gmc.vm_fault_info;
1584                 mb();
1585                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1586         }
1587         return 0;
1588 }
1589
1590 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1591                                       struct dma_buf *dma_buf,
1592                                       uint64_t va, void *vm,
1593                                       struct kgd_mem **mem, uint64_t *size,
1594                                       uint64_t *mmap_offset)
1595 {
1596         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1597         struct drm_gem_object *obj;
1598         struct amdgpu_bo *bo;
1599         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1600
1601         if (dma_buf->ops != &amdgpu_dmabuf_ops)
1602                 /* Can't handle non-graphics buffers */
1603                 return -EINVAL;
1604
1605         obj = dma_buf->priv;
1606         if (obj->dev->dev_private != adev)
1607                 /* Can't handle buffers from other devices */
1608                 return -EINVAL;
1609
1610         bo = gem_to_amdgpu_bo(obj);
1611         if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1612                                     AMDGPU_GEM_DOMAIN_GTT)))
1613                 /* Only VRAM and GTT BOs are supported */
1614                 return -EINVAL;
1615
1616         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1617         if (!*mem)
1618                 return -ENOMEM;
1619
1620         if (size)
1621                 *size = amdgpu_bo_size(bo);
1622
1623         if (mmap_offset)
1624                 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1625
1626         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1627         mutex_init(&(*mem)->lock);
1628         (*mem)->mapping_flags =
1629                 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
1630                 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
1631
1632         (*mem)->bo = amdgpu_bo_ref(bo);
1633         (*mem)->va = va;
1634         (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1635                 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1636         (*mem)->mapped_to_gpu_memory = 0;
1637         (*mem)->process_info = avm->process_info;
1638         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1639         amdgpu_sync_create(&(*mem)->sync);
1640
1641         return 0;
1642 }
1643
1644 /* Evict a userptr BO by stopping the queues if necessary
1645  *
1646  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1647  * cannot do any memory allocations, and cannot take any locks that
1648  * are held elsewhere while allocating memory. Therefore this is as
1649  * simple as possible, using atomic counters.
1650  *
1651  * It doesn't do anything to the BO itself. The real work happens in
1652  * restore, where we get updated page addresses. This function only
1653  * ensures that GPU access to the BO is stopped.
1654  */
1655 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1656                                 struct mm_struct *mm)
1657 {
1658         struct amdkfd_process_info *process_info = mem->process_info;
1659         int invalid, evicted_bos;
1660         int r = 0;
1661
1662         invalid = atomic_inc_return(&mem->invalid);
1663         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1664         if (evicted_bos == 1) {
1665                 /* First eviction, stop the queues */
1666                 r = kgd2kfd_quiesce_mm(mm);
1667                 if (r)
1668                         pr_err("Failed to quiesce KFD\n");
1669                 schedule_delayed_work(&process_info->restore_userptr_work,
1670                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1671         }
1672
1673         return r;
1674 }
1675
1676 /* Update invalid userptr BOs
1677  *
1678  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1679  * userptr_inval_list and updates user pages for all BOs that have
1680  * been invalidated since their last update.
1681  */
1682 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1683                                      struct mm_struct *mm)
1684 {
1685         struct kgd_mem *mem, *tmp_mem;
1686         struct amdgpu_bo *bo;
1687         struct ttm_operation_ctx ctx = { false, false };
1688         int invalid, ret;
1689
1690         /* Move all invalidated BOs to the userptr_inval_list and
1691          * release their user pages by migration to the CPU domain
1692          */
1693         list_for_each_entry_safe(mem, tmp_mem,
1694                                  &process_info->userptr_valid_list,
1695                                  validate_list.head) {
1696                 if (!atomic_read(&mem->invalid))
1697                         continue; /* BO is still valid */
1698
1699                 bo = mem->bo;
1700
1701                 if (amdgpu_bo_reserve(bo, true))
1702                         return -EAGAIN;
1703                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1704                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1705                 amdgpu_bo_unreserve(bo);
1706                 if (ret) {
1707                         pr_err("%s: Failed to invalidate userptr BO\n",
1708                                __func__);
1709                         return -EAGAIN;
1710                 }
1711
1712                 list_move_tail(&mem->validate_list.head,
1713                                &process_info->userptr_inval_list);
1714         }
1715
1716         if (list_empty(&process_info->userptr_inval_list))
1717                 return 0; /* All evicted userptr BOs were freed */
1718
1719         /* Go through userptr_inval_list and update any invalid user_pages */
1720         list_for_each_entry(mem, &process_info->userptr_inval_list,
1721                             validate_list.head) {
1722                 invalid = atomic_read(&mem->invalid);
1723                 if (!invalid)
1724                         /* BO hasn't been invalidated since the last
1725                          * revalidation attempt. Keep its BO list.
1726                          */
1727                         continue;
1728
1729                 bo = mem->bo;
1730
1731                 /* Get updated user pages */
1732                 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1733                 if (ret) {
1734                         pr_debug("%s: Failed to get user pages: %d\n",
1735                                 __func__, ret);
1736
1737                         /* Return error -EBUSY or -ENOMEM, retry restore */
1738                         return ret;
1739                 }
1740
1741                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1742
1743                 /* Mark the BO as valid unless it was invalidated
1744                  * again concurrently.
1745                  */
1746                 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1747                         return -EAGAIN;
1748         }
1749
1750         return 0;
1751 }
1752
1753 /* Validate invalid userptr BOs
1754  *
1755  * Validates BOs on the userptr_inval_list, and moves them back to the
1756  * userptr_valid_list. Also updates GPUVM page tables with new page
1757  * addresses and waits for the page table updates to complete.
1758  */
1759 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1760 {
1761         struct amdgpu_bo_list_entry *pd_bo_list_entries;
1762         struct list_head resv_list, duplicates;
1763         struct ww_acquire_ctx ticket;
1764         struct amdgpu_sync sync;
1765
1766         struct amdgpu_vm *peer_vm;
1767         struct kgd_mem *mem, *tmp_mem;
1768         struct amdgpu_bo *bo;
1769         struct ttm_operation_ctx ctx = { false, false };
1770         int i, ret;
1771
1772         pd_bo_list_entries = kcalloc(process_info->n_vms,
1773                                      sizeof(struct amdgpu_bo_list_entry),
1774                                      GFP_KERNEL);
1775         if (!pd_bo_list_entries) {
1776                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1777                 ret = -ENOMEM;
1778                 goto out_no_mem;
1779         }
1780
1781         INIT_LIST_HEAD(&resv_list);
1782         INIT_LIST_HEAD(&duplicates);
1783
1784         /* Get all the page directory BOs that need to be reserved */
1785         i = 0;
1786         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1787                             vm_list_node)
1788                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1789                                     &pd_bo_list_entries[i++]);
1790         /* Add the userptr_inval_list entries to resv_list */
1791         list_for_each_entry(mem, &process_info->userptr_inval_list,
1792                             validate_list.head) {
1793                 list_add_tail(&mem->resv_list.head, &resv_list);
1794                 mem->resv_list.bo = mem->validate_list.bo;
1795                 mem->resv_list.num_shared = mem->validate_list.num_shared;
1796         }
1797
1798         /* Reserve all BOs and page tables for validation */
1799         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
1800                                      true);
1801         WARN(!list_empty(&duplicates), "Duplicates should be empty");
1802         if (ret)
1803                 goto out_free;
1804
1805         amdgpu_sync_create(&sync);
1806
1807         ret = process_validate_vms(process_info);
1808         if (ret)
1809                 goto unreserve_out;
1810
1811         /* Validate BOs and update GPUVM page tables */
1812         list_for_each_entry_safe(mem, tmp_mem,
1813                                  &process_info->userptr_inval_list,
1814                                  validate_list.head) {
1815                 struct kfd_bo_va_list *bo_va_entry;
1816
1817                 bo = mem->bo;
1818
1819                 /* Validate the BO if we got user pages */
1820                 if (bo->tbo.ttm->pages[0]) {
1821                         amdgpu_bo_placement_from_domain(bo, mem->domain);
1822                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1823                         if (ret) {
1824                                 pr_err("%s: failed to validate BO\n", __func__);
1825                                 goto unreserve_out;
1826                         }
1827                 }
1828
1829                 list_move_tail(&mem->validate_list.head,
1830                                &process_info->userptr_valid_list);
1831
1832                 /* Update mapping. If the BO was not validated
1833                  * (because we couldn't get user pages), this will
1834                  * clear the page table entries, which will result in
1835                  * VM faults if the GPU tries to access the invalid
1836                  * memory.
1837                  */
1838                 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1839                         if (!bo_va_entry->is_mapped)
1840                                 continue;
1841
1842                         ret = update_gpuvm_pte((struct amdgpu_device *)
1843                                                bo_va_entry->kgd_dev,
1844                                                bo_va_entry, &sync);
1845                         if (ret) {
1846                                 pr_err("%s: update PTE failed\n", __func__);
1847                                 /* make sure this gets validated again */
1848                                 atomic_inc(&mem->invalid);
1849                                 goto unreserve_out;
1850                         }
1851                 }
1852         }
1853
1854         /* Update page directories */
1855         ret = process_update_pds(process_info, &sync);
1856
1857 unreserve_out:
1858         ttm_eu_backoff_reservation(&ticket, &resv_list);
1859         amdgpu_sync_wait(&sync, false);
1860         amdgpu_sync_free(&sync);
1861 out_free:
1862         kfree(pd_bo_list_entries);
1863 out_no_mem:
1864
1865         return ret;
1866 }
1867
1868 /* Worker callback to restore evicted userptr BOs
1869  *
1870  * Tries to update and validate all userptr BOs. If successful and no
1871  * concurrent evictions happened, the queues are restarted. Otherwise,
1872  * reschedule for another attempt later.
1873  */
1874 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1875 {
1876         struct delayed_work *dwork = to_delayed_work(work);
1877         struct amdkfd_process_info *process_info =
1878                 container_of(dwork, struct amdkfd_process_info,
1879                              restore_userptr_work);
1880         struct task_struct *usertask;
1881         struct mm_struct *mm;
1882         int evicted_bos;
1883
1884         evicted_bos = atomic_read(&process_info->evicted_bos);
1885         if (!evicted_bos)
1886                 return;
1887
1888         /* Reference task and mm in case of concurrent process termination */
1889         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1890         if (!usertask)
1891                 return;
1892         mm = get_task_mm(usertask);
1893         if (!mm) {
1894                 put_task_struct(usertask);
1895                 return;
1896         }
1897
1898         mutex_lock(&process_info->lock);
1899
1900         if (update_invalid_user_pages(process_info, mm))
1901                 goto unlock_out;
1902         /* userptr_inval_list can be empty if all evicted userptr BOs
1903          * have been freed. In that case there is nothing to validate
1904          * and we can just restart the queues.
1905          */
1906         if (!list_empty(&process_info->userptr_inval_list)) {
1907                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1908                         goto unlock_out; /* Concurrent eviction, try again */
1909
1910                 if (validate_invalid_user_pages(process_info))
1911                         goto unlock_out;
1912         }
1913         /* Final check for concurrent evicton and atomic update. If
1914          * another eviction happens after successful update, it will
1915          * be a first eviction that calls quiesce_mm. The eviction
1916          * reference counting inside KFD will handle this case.
1917          */
1918         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1919             evicted_bos)
1920                 goto unlock_out;
1921         evicted_bos = 0;
1922         if (kgd2kfd_resume_mm(mm)) {
1923                 pr_err("%s: Failed to resume KFD\n", __func__);
1924                 /* No recovery from this failure. Probably the CP is
1925                  * hanging. No point trying again.
1926                  */
1927         }
1928
1929 unlock_out:
1930         mutex_unlock(&process_info->lock);
1931         mmput(mm);
1932         put_task_struct(usertask);
1933
1934         /* If validation failed, reschedule another attempt */
1935         if (evicted_bos)
1936                 schedule_delayed_work(&process_info->restore_userptr_work,
1937                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1938 }
1939
1940 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1941  *   KFD process identified by process_info
1942  *
1943  * @process_info: amdkfd_process_info of the KFD process
1944  *
1945  * After memory eviction, restore thread calls this function. The function
1946  * should be called when the Process is still valid. BO restore involves -
1947  *
1948  * 1.  Release old eviction fence and create new one
1949  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1950  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1951  *     BOs that need to be reserved.
1952  * 4.  Reserve all the BOs
1953  * 5.  Validate of PD and PT BOs.
1954  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1955  * 7.  Add fence to all PD and PT BOs.
1956  * 8.  Unreserve all BOs
1957  */
1958 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1959 {
1960         struct amdgpu_bo_list_entry *pd_bo_list;
1961         struct amdkfd_process_info *process_info = info;
1962         struct amdgpu_vm *peer_vm;
1963         struct kgd_mem *mem;
1964         struct bo_vm_reservation_context ctx;
1965         struct amdgpu_amdkfd_fence *new_fence;
1966         int ret = 0, i;
1967         struct list_head duplicate_save;
1968         struct amdgpu_sync sync_obj;
1969
1970         INIT_LIST_HEAD(&duplicate_save);
1971         INIT_LIST_HEAD(&ctx.list);
1972         INIT_LIST_HEAD(&ctx.duplicates);
1973
1974         pd_bo_list = kcalloc(process_info->n_vms,
1975                              sizeof(struct amdgpu_bo_list_entry),
1976                              GFP_KERNEL);
1977         if (!pd_bo_list)
1978                 return -ENOMEM;
1979
1980         i = 0;
1981         mutex_lock(&process_info->lock);
1982         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1983                         vm_list_node)
1984                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
1985
1986         /* Reserve all BOs and page tables/directory. Add all BOs from
1987          * kfd_bo_list to ctx.list
1988          */
1989         list_for_each_entry(mem, &process_info->kfd_bo_list,
1990                             validate_list.head) {
1991
1992                 list_add_tail(&mem->resv_list.head, &ctx.list);
1993                 mem->resv_list.bo = mem->validate_list.bo;
1994                 mem->resv_list.num_shared = mem->validate_list.num_shared;
1995         }
1996
1997         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
1998                                      false, &duplicate_save, true);
1999         if (ret) {
2000                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2001                 goto ttm_reserve_fail;
2002         }
2003
2004         amdgpu_sync_create(&sync_obj);
2005
2006         /* Validate PDs and PTs */
2007         ret = process_validate_vms(process_info);
2008         if (ret)
2009                 goto validate_map_fail;
2010
2011         ret = process_sync_pds_resv(process_info, &sync_obj);
2012         if (ret) {
2013                 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2014                 goto validate_map_fail;
2015         }
2016
2017         /* Validate BOs and map them to GPUVM (update VM page tables). */
2018         list_for_each_entry(mem, &process_info->kfd_bo_list,
2019                             validate_list.head) {
2020
2021                 struct amdgpu_bo *bo = mem->bo;
2022                 uint32_t domain = mem->domain;
2023                 struct kfd_bo_va_list *bo_va_entry;
2024
2025                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2026                 if (ret) {
2027                         pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2028                         goto validate_map_fail;
2029                 }
2030                 ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
2031                 if (ret) {
2032                         pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2033                         goto validate_map_fail;
2034                 }
2035                 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2036                                     bo_list) {
2037                         ret = update_gpuvm_pte((struct amdgpu_device *)
2038                                               bo_va_entry->kgd_dev,
2039                                               bo_va_entry,
2040                                               &sync_obj);
2041                         if (ret) {
2042                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2043                                 goto validate_map_fail;
2044                         }
2045                 }
2046         }
2047
2048         /* Update page directories */
2049         ret = process_update_pds(process_info, &sync_obj);
2050         if (ret) {
2051                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2052                 goto validate_map_fail;
2053         }
2054
2055         /* Wait for validate and PT updates to finish */
2056         amdgpu_sync_wait(&sync_obj, false);
2057
2058         /* Release old eviction fence and create new one, because fence only
2059          * goes from unsignaled to signaled, fence cannot be reused.
2060          * Use context and mm from the old fence.
2061          */
2062         new_fence = amdgpu_amdkfd_fence_create(
2063                                 process_info->eviction_fence->base.context,
2064                                 process_info->eviction_fence->mm);
2065         if (!new_fence) {
2066                 pr_err("Failed to create eviction fence\n");
2067                 ret = -ENOMEM;
2068                 goto validate_map_fail;
2069         }
2070         dma_fence_put(&process_info->eviction_fence->base);
2071         process_info->eviction_fence = new_fence;
2072         *ef = dma_fence_get(&new_fence->base);
2073
2074         /* Attach new eviction fence to all BOs */
2075         list_for_each_entry(mem, &process_info->kfd_bo_list,
2076                 validate_list.head)
2077                 amdgpu_bo_fence(mem->bo,
2078                         &process_info->eviction_fence->base, true);
2079
2080         /* Attach eviction fence to PD / PT BOs */
2081         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2082                             vm_list_node) {
2083                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2084
2085                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2086         }
2087
2088 validate_map_fail:
2089         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2090         amdgpu_sync_free(&sync_obj);
2091 ttm_reserve_fail:
2092         mutex_unlock(&process_info->lock);
2093         kfree(pd_bo_list);
2094         return ret;
2095 }
2096
2097 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2098 {
2099         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2100         struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2101         int ret;
2102
2103         if (!info || !gws)
2104                 return -EINVAL;
2105
2106         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2107         if (!*mem)
2108                 return -ENOMEM;
2109
2110         mutex_init(&(*mem)->lock);
2111         (*mem)->bo = amdgpu_bo_ref(gws_bo);
2112         (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2113         (*mem)->process_info = process_info;
2114         add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2115         amdgpu_sync_create(&(*mem)->sync);
2116
2117
2118         /* Validate gws bo the first time it is added to process */
2119         mutex_lock(&(*mem)->process_info->lock);
2120         ret = amdgpu_bo_reserve(gws_bo, false);
2121         if (unlikely(ret)) {
2122                 pr_err("Reserve gws bo failed %d\n", ret);
2123                 goto bo_reservation_failure;
2124         }
2125
2126         ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2127         if (ret) {
2128                 pr_err("GWS BO validate failed %d\n", ret);
2129                 goto bo_validation_failure;
2130         }
2131         /* GWS resource is shared b/t amdgpu and amdkfd
2132          * Add process eviction fence to bo so they can
2133          * evict each other.
2134          */
2135         ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2136         if (ret)
2137                 goto reserve_shared_fail;
2138         amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2139         amdgpu_bo_unreserve(gws_bo);
2140         mutex_unlock(&(*mem)->process_info->lock);
2141
2142         return ret;
2143
2144 reserve_shared_fail:
2145 bo_validation_failure:
2146         amdgpu_bo_unreserve(gws_bo);
2147 bo_reservation_failure:
2148         mutex_unlock(&(*mem)->process_info->lock);
2149         amdgpu_sync_free(&(*mem)->sync);
2150         remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2151         amdgpu_bo_unref(&gws_bo);
2152         mutex_destroy(&(*mem)->lock);
2153         kfree(*mem);
2154         *mem = NULL;
2155         return ret;
2156 }
2157
2158 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2159 {
2160         int ret;
2161         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2162         struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2163         struct amdgpu_bo *gws_bo = kgd_mem->bo;
2164
2165         /* Remove BO from process's validate list so restore worker won't touch
2166          * it anymore
2167          */
2168         remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2169
2170         ret = amdgpu_bo_reserve(gws_bo, false);
2171         if (unlikely(ret)) {
2172                 pr_err("Reserve gws bo failed %d\n", ret);
2173                 //TODO add BO back to validate_list?
2174                 return ret;
2175         }
2176         amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2177                         process_info->eviction_fence);
2178         amdgpu_bo_unreserve(gws_bo);
2179         amdgpu_sync_free(&kgd_mem->sync);
2180         amdgpu_bo_unref(&gws_bo);
2181         mutex_destroy(&kgd_mem->lock);
2182         kfree(mem);
2183         return 0;
2184 }