]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/misc/habanalabs/memory.c
Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / drivers / misc / habanalabs / memory.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright 2016-2019 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10 #include "include/hw_ip/mmu/mmu_general.h"
11
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
14 #include <linux/genalloc.h>
15
16 #define PGS_IN_2MB_PAGE (PAGE_SIZE_2MB >> PAGE_SHIFT)
17 #define HL_MMU_DEBUG    0
18
19 /*
20  * The va ranges in context object contain a list with the available chunks of
21  * device virtual memory.
22  * There is one range for host allocations and one for DRAM allocations.
23  *
24  * On initialization each range contains one chunk of all of its available
25  * virtual range which is a half of the total device virtual range.
26  *
27  * On each mapping of physical pages, a suitable virtual range chunk (with a
28  * minimum size) is selected from the list. If the chunk size equals the
29  * requested size, the chunk is returned. Otherwise, the chunk is split into
30  * two chunks - one to return as result and a remainder to stay in the list.
31  *
32  * On each Unmapping of a virtual address, the relevant virtual chunk is
33  * returned to the list. The chunk is added to the list and if its edges match
34  * the edges of the adjacent chunks (means a contiguous chunk can be created),
35  * the chunks are merged.
36  *
37  * On finish, the list is checked to have only one chunk of all the relevant
38  * virtual range (which is a half of the device total virtual range).
39  * If not (means not all mappings were unmapped), a warning is printed.
40  */
41
42 /*
43  * alloc_device_memory - allocate device memory
44  *
45  * @ctx                 : current context
46  * @args                : host parameters containing the requested size
47  * @ret_handle          : result handle
48  *
49  * This function does the following:
50  * - Allocate the requested size rounded up to 2MB pages
51  * - Return unique handle
52  */
53 static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
54                                 u32 *ret_handle)
55 {
56         struct hl_device *hdev = ctx->hdev;
57         struct hl_vm *vm = &hdev->vm;
58         struct hl_vm_phys_pg_pack *phys_pg_pack;
59         u64 paddr = 0, total_size, num_pgs, i;
60         u32 num_curr_pgs, page_size, page_shift;
61         int handle, rc;
62         bool contiguous;
63
64         num_curr_pgs = 0;
65         page_size = hdev->asic_prop.dram_page_size;
66         page_shift = __ffs(page_size);
67         num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift;
68         total_size = num_pgs << page_shift;
69
70         contiguous = args->flags & HL_MEM_CONTIGUOUS;
71
72         if (contiguous) {
73                 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
74                 if (!paddr) {
75                         dev_err(hdev->dev,
76                                 "failed to allocate %llu huge contiguous pages\n",
77                                 num_pgs);
78                         return -ENOMEM;
79                 }
80         }
81
82         phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
83         if (!phys_pg_pack) {
84                 rc = -ENOMEM;
85                 goto pages_pack_err;
86         }
87
88         phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
89         phys_pg_pack->asid = ctx->asid;
90         phys_pg_pack->npages = num_pgs;
91         phys_pg_pack->page_size = page_size;
92         phys_pg_pack->total_size = total_size;
93         phys_pg_pack->flags = args->flags;
94         phys_pg_pack->contiguous = contiguous;
95
96         phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
97         if (!phys_pg_pack->pages) {
98                 rc = -ENOMEM;
99                 goto pages_arr_err;
100         }
101
102         if (phys_pg_pack->contiguous) {
103                 for (i = 0 ; i < num_pgs ; i++)
104                         phys_pg_pack->pages[i] = paddr + i * page_size;
105         } else {
106                 for (i = 0 ; i < num_pgs ; i++) {
107                         phys_pg_pack->pages[i] = (u64) gen_pool_alloc(
108                                                         vm->dram_pg_pool,
109                                                         page_size);
110                         if (!phys_pg_pack->pages[i]) {
111                                 dev_err(hdev->dev,
112                                         "Failed to allocate device memory (out of memory)\n");
113                                 rc = -ENOMEM;
114                                 goto page_err;
115                         }
116
117                         num_curr_pgs++;
118                 }
119         }
120
121         spin_lock(&vm->idr_lock);
122         handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
123                                 GFP_ATOMIC);
124         spin_unlock(&vm->idr_lock);
125
126         if (handle < 0) {
127                 dev_err(hdev->dev, "Failed to get handle for page\n");
128                 rc = -EFAULT;
129                 goto idr_err;
130         }
131
132         for (i = 0 ; i < num_pgs ; i++)
133                 kref_get(&vm->dram_pg_pool_refcount);
134
135         phys_pg_pack->handle = handle;
136
137         atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
138         atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
139
140         *ret_handle = handle;
141
142         return 0;
143
144 idr_err:
145 page_err:
146         if (!phys_pg_pack->contiguous)
147                 for (i = 0 ; i < num_curr_pgs ; i++)
148                         gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
149                                         page_size);
150
151         kvfree(phys_pg_pack->pages);
152 pages_arr_err:
153         kfree(phys_pg_pack);
154 pages_pack_err:
155         if (contiguous)
156                 gen_pool_free(vm->dram_pg_pool, paddr, total_size);
157
158         return rc;
159 }
160
161 /*
162  * get_userptr_from_host_va - initialize userptr structure from given host
163  *                            virtual address
164  *
165  * @hdev                : habanalabs device structure
166  * @args                : parameters containing the virtual address and size
167  * @p_userptr           : pointer to result userptr structure
168  *
169  * This function does the following:
170  * - Allocate userptr structure
171  * - Pin the given host memory using the userptr structure
172  * - Perform DMA mapping to have the DMA addresses of the pages
173  */
174 static int get_userptr_from_host_va(struct hl_device *hdev,
175                 struct hl_mem_in *args, struct hl_userptr **p_userptr)
176 {
177         struct hl_userptr *userptr;
178         int rc;
179
180         userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
181         if (!userptr) {
182                 rc = -ENOMEM;
183                 goto userptr_err;
184         }
185
186         rc = hl_pin_host_memory(hdev, args->map_host.host_virt_addr,
187                         args->map_host.mem_size, userptr);
188         if (rc) {
189                 dev_err(hdev->dev, "Failed to pin host memory\n");
190                 goto pin_err;
191         }
192
193         rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
194                                         userptr->sgt->nents, DMA_BIDIRECTIONAL);
195         if (rc) {
196                 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
197                 goto dma_map_err;
198         }
199
200         userptr->dma_mapped = true;
201         userptr->dir = DMA_BIDIRECTIONAL;
202         userptr->vm_type = VM_TYPE_USERPTR;
203
204         *p_userptr = userptr;
205
206         return 0;
207
208 dma_map_err:
209         hl_unpin_host_memory(hdev, userptr);
210 pin_err:
211         kfree(userptr);
212 userptr_err:
213
214         return rc;
215 }
216
217 /*
218  * free_userptr - free userptr structure
219  *
220  * @hdev                : habanalabs device structure
221  * @userptr             : userptr to free
222  *
223  * This function does the following:
224  * - Unpins the physical pages
225  * - Frees the userptr structure
226  */
227 static void free_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
228 {
229         hl_unpin_host_memory(hdev, userptr);
230         kfree(userptr);
231 }
232
233 /*
234  * dram_pg_pool_do_release - free DRAM pages pool
235  *
236  * @ref                 : pointer to reference object
237  *
238  * This function does the following:
239  * - Frees the idr structure of physical pages handles
240  * - Frees the generic pool of DRAM physical pages
241  */
242 static void dram_pg_pool_do_release(struct kref *ref)
243 {
244         struct hl_vm *vm = container_of(ref, struct hl_vm,
245                         dram_pg_pool_refcount);
246
247         /*
248          * free the idr here as only here we know for sure that there are no
249          * allocated physical pages and hence there are no handles in use
250          */
251         idr_destroy(&vm->phys_pg_pack_handles);
252         gen_pool_destroy(vm->dram_pg_pool);
253 }
254
255 /*
256  * free_phys_pg_pack   - free physical page pack
257  *
258  * @hdev               : habanalabs device structure
259  * @phys_pg_pack       : physical page pack to free
260  *
261  * This function does the following:
262  * - For DRAM memory only, iterate over the pack and free each physical block
263  *   structure by returning it to the general pool
264  * - Free the hl_vm_phys_pg_pack structure
265  */
266 static void free_phys_pg_pack(struct hl_device *hdev,
267                 struct hl_vm_phys_pg_pack *phys_pg_pack)
268 {
269         struct hl_vm *vm = &hdev->vm;
270         u64 i;
271
272         if (!phys_pg_pack->created_from_userptr) {
273                 if (phys_pg_pack->contiguous) {
274                         gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
275                                         phys_pg_pack->total_size);
276
277                         for (i = 0; i < phys_pg_pack->npages ; i++)
278                                 kref_put(&vm->dram_pg_pool_refcount,
279                                         dram_pg_pool_do_release);
280                 } else {
281                         for (i = 0 ; i < phys_pg_pack->npages ; i++) {
282                                 gen_pool_free(vm->dram_pg_pool,
283                                                 phys_pg_pack->pages[i],
284                                                 phys_pg_pack->page_size);
285                                 kref_put(&vm->dram_pg_pool_refcount,
286                                         dram_pg_pool_do_release);
287                         }
288                 }
289         }
290
291         kvfree(phys_pg_pack->pages);
292         kfree(phys_pg_pack);
293 }
294
295 /*
296  * free_device_memory - free device memory
297  *
298  * @ctx                  : current context
299  * @handle              : handle of the memory chunk to free
300  *
301  * This function does the following:
302  * - Free the device memory related to the given handle
303  */
304 static int free_device_memory(struct hl_ctx *ctx, u32 handle)
305 {
306         struct hl_device *hdev = ctx->hdev;
307         struct hl_vm *vm = &hdev->vm;
308         struct hl_vm_phys_pg_pack *phys_pg_pack;
309
310         spin_lock(&vm->idr_lock);
311         phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
312         if (phys_pg_pack) {
313                 if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
314                         dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
315                                 handle);
316                         spin_unlock(&vm->idr_lock);
317                         return -EINVAL;
318                 }
319
320                 /*
321                  * must remove from idr before the freeing of the physical
322                  * pages as the refcount of the pool is also the trigger of the
323                  * idr destroy
324                  */
325                 idr_remove(&vm->phys_pg_pack_handles, handle);
326                 spin_unlock(&vm->idr_lock);
327
328                 atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
329                 atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
330
331                 free_phys_pg_pack(hdev, phys_pg_pack);
332         } else {
333                 spin_unlock(&vm->idr_lock);
334                 dev_err(hdev->dev,
335                         "free device memory failed, no match for handle %u\n",
336                         handle);
337                 return -EINVAL;
338         }
339
340         return 0;
341 }
342
343 /*
344  * clear_va_list_locked - free virtual addresses list
345  *
346  * @hdev                : habanalabs device structure
347  * @va_list             : list of virtual addresses to free
348  *
349  * This function does the following:
350  * - Iterate over the list and free each virtual addresses block
351  *
352  * This function should be called only when va_list lock is taken
353  */
354 static void clear_va_list_locked(struct hl_device *hdev,
355                 struct list_head *va_list)
356 {
357         struct hl_vm_va_block *va_block, *tmp;
358
359         list_for_each_entry_safe(va_block, tmp, va_list, node) {
360                 list_del(&va_block->node);
361                 kfree(va_block);
362         }
363 }
364
365 /*
366  * print_va_list_locked    - print virtual addresses list
367  *
368  * @hdev                : habanalabs device structure
369  * @va_list             : list of virtual addresses to print
370  *
371  * This function does the following:
372  * - Iterate over the list and print each virtual addresses block
373  *
374  * This function should be called only when va_list lock is taken
375  */
376 static void print_va_list_locked(struct hl_device *hdev,
377                 struct list_head *va_list)
378 {
379 #if HL_MMU_DEBUG
380         struct hl_vm_va_block *va_block;
381
382         dev_dbg(hdev->dev, "print va list:\n");
383
384         list_for_each_entry(va_block, va_list, node)
385                 dev_dbg(hdev->dev,
386                         "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
387                         va_block->start, va_block->end, va_block->size);
388 #endif
389 }
390
391 /*
392  * merge_va_blocks_locked - merge a virtual block if possible
393  *
394  * @hdev                : pointer to the habanalabs device structure
395  * @va_list             : pointer to the virtual addresses block list
396  * @va_block            : virtual block to merge with adjacent blocks
397  *
398  * This function does the following:
399  * - Merge the given blocks with the adjacent blocks if their virtual ranges
400  *   create a contiguous virtual range
401  *
402  * This Function should be called only when va_list lock is taken
403  */
404 static void merge_va_blocks_locked(struct hl_device *hdev,
405                 struct list_head *va_list, struct hl_vm_va_block *va_block)
406 {
407         struct hl_vm_va_block *prev, *next;
408
409         prev = list_prev_entry(va_block, node);
410         if (&prev->node != va_list && prev->end + 1 == va_block->start) {
411                 prev->end = va_block->end;
412                 prev->size = prev->end - prev->start;
413                 list_del(&va_block->node);
414                 kfree(va_block);
415                 va_block = prev;
416         }
417
418         next = list_next_entry(va_block, node);
419         if (&next->node != va_list && va_block->end + 1 == next->start) {
420                 next->start = va_block->start;
421                 next->size = next->end - next->start;
422                 list_del(&va_block->node);
423                 kfree(va_block);
424         }
425 }
426
427 /*
428  * add_va_block_locked - add a virtual block to the virtual addresses list
429  *
430  * @hdev                : pointer to the habanalabs device structure
431  * @va_list             : pointer to the virtual addresses block list
432  * @start               : start virtual address
433  * @end                 : end virtual address
434  *
435  * This function does the following:
436  * - Add the given block to the virtual blocks list and merge with other
437  * blocks if a contiguous virtual block can be created
438  *
439  * This Function should be called only when va_list lock is taken
440  */
441 static int add_va_block_locked(struct hl_device *hdev,
442                 struct list_head *va_list, u64 start, u64 end)
443 {
444         struct hl_vm_va_block *va_block, *res = NULL;
445         u64 size = end - start;
446
447         print_va_list_locked(hdev, va_list);
448
449         list_for_each_entry(va_block, va_list, node) {
450                 /* TODO: remove upon matureness */
451                 if (hl_mem_area_crosses_range(start, size, va_block->start,
452                                 va_block->end)) {
453                         dev_err(hdev->dev,
454                                 "block crossing ranges at start 0x%llx, end 0x%llx\n",
455                                 va_block->start, va_block->end);
456                         return -EINVAL;
457                 }
458
459                 if (va_block->end < start)
460                         res = va_block;
461         }
462
463         va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
464         if (!va_block)
465                 return -ENOMEM;
466
467         va_block->start = start;
468         va_block->end = end;
469         va_block->size = size;
470
471         if (!res)
472                 list_add(&va_block->node, va_list);
473         else
474                 list_add(&va_block->node, &res->node);
475
476         merge_va_blocks_locked(hdev, va_list, va_block);
477
478         print_va_list_locked(hdev, va_list);
479
480         return 0;
481 }
482
483 /*
484  * add_va_block - wrapper for add_va_block_locked
485  *
486  * @hdev                : pointer to the habanalabs device structure
487  * @va_list             : pointer to the virtual addresses block list
488  * @start               : start virtual address
489  * @end                 : end virtual address
490  *
491  * This function does the following:
492  * - Takes the list lock and calls add_va_block_locked
493  */
494 static inline int add_va_block(struct hl_device *hdev,
495                 struct hl_va_range *va_range, u64 start, u64 end)
496 {
497         int rc;
498
499         mutex_lock(&va_range->lock);
500         rc = add_va_block_locked(hdev, &va_range->list, start, end);
501         mutex_unlock(&va_range->lock);
502
503         return rc;
504 }
505
506 /*
507  * get_va_block - get a virtual block with the requested size
508  *
509  * @hdev            : pointer to the habanalabs device structure
510  * @va_range        : pointer to the virtual addresses range
511  * @size            : requested block size
512  * @hint_addr       : hint for request address by the user
513  * @is_userptr      : is host or DRAM memory
514  *
515  * This function does the following:
516  * - Iterate on the virtual block list to find a suitable virtual block for the
517  *   requested size
518  * - Reserve the requested block and update the list
519  * - Return the start address of the virtual block
520  */
521 static u64 get_va_block(struct hl_device *hdev,
522                 struct hl_va_range *va_range, u64 size, u64 hint_addr,
523                 bool is_userptr)
524 {
525         struct hl_vm_va_block *va_block, *new_va_block = NULL;
526         u64 valid_start, valid_size, prev_start, prev_end, page_mask,
527                 res_valid_start = 0, res_valid_size = 0;
528         u32 page_size;
529         bool add_prev = false;
530
531         if (is_userptr) {
532                 /*
533                  * We cannot know if the user allocated memory with huge pages
534                  * or not, hence we continue with the biggest possible
535                  * granularity.
536                  */
537                 page_size = PAGE_SIZE_2MB;
538                 page_mask = PAGE_MASK_2MB;
539         } else {
540                 page_size = hdev->asic_prop.dram_page_size;
541                 page_mask = ~((u64)page_size - 1);
542         }
543
544         mutex_lock(&va_range->lock);
545
546         print_va_list_locked(hdev, &va_range->list);
547
548         list_for_each_entry(va_block, &va_range->list, node) {
549                 /* calc the first possible aligned addr */
550                 valid_start = va_block->start;
551
552
553                 if (valid_start & (page_size - 1)) {
554                         valid_start &= page_mask;
555                         valid_start += page_size;
556                         if (valid_start > va_block->end)
557                                 continue;
558                 }
559
560                 valid_size = va_block->end - valid_start;
561
562                 if (valid_size >= size &&
563                         (!new_va_block || valid_size < res_valid_size)) {
564
565                         new_va_block = va_block;
566                         res_valid_start = valid_start;
567                         res_valid_size = valid_size;
568                 }
569
570                 if (hint_addr && hint_addr >= valid_start &&
571                                 ((hint_addr + size) <= va_block->end)) {
572                         new_va_block = va_block;
573                         res_valid_start = hint_addr;
574                         res_valid_size = valid_size;
575                         break;
576                 }
577         }
578
579         if (!new_va_block) {
580                 dev_err(hdev->dev, "no available va block for size %llu\n",
581                                 size);
582                 goto out;
583         }
584
585         if (res_valid_start > new_va_block->start) {
586                 prev_start = new_va_block->start;
587                 prev_end = res_valid_start - 1;
588
589                 new_va_block->start = res_valid_start;
590                 new_va_block->size = res_valid_size;
591
592                 add_prev = true;
593         }
594
595         if (new_va_block->size > size) {
596                 new_va_block->start += size;
597                 new_va_block->size = new_va_block->end - new_va_block->start;
598         } else {
599                 list_del(&new_va_block->node);
600                 kfree(new_va_block);
601         }
602
603         if (add_prev)
604                 add_va_block_locked(hdev, &va_range->list, prev_start,
605                                 prev_end);
606
607         print_va_list_locked(hdev, &va_range->list);
608 out:
609         mutex_unlock(&va_range->lock);
610
611         return res_valid_start;
612 }
613
614 /*
615  * get_sg_info - get number of pages and the DMA address from SG list
616  *
617  * @sg                 : the SG list
618  * @dma_addr           : pointer to DMA address to return
619  *
620  * Calculate the number of consecutive pages described by the SG list. Take the
621  * offset of the address in the first page, add to it the length and round it up
622  * to the number of needed pages.
623  */
624 static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
625 {
626         *dma_addr = sg_dma_address(sg);
627
628         return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
629                         (PAGE_SIZE - 1)) >> PAGE_SHIFT;
630 }
631
632 /*
633  * init_phys_pg_pack_from_userptr - initialize physical page pack from host
634  *                                   memory
635  *
636  * @ctx                : current context
637  * @userptr            : userptr to initialize from
638  * @pphys_pg_pack      : res pointer
639  *
640  * This function does the following:
641  * - Pin the physical pages related to the given virtual block
642  * - Create a physical page pack from the physical pages related to the given
643  *   virtual block
644  */
645 static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
646                 struct hl_userptr *userptr,
647                 struct hl_vm_phys_pg_pack **pphys_pg_pack)
648 {
649         struct hl_vm_phys_pg_pack *phys_pg_pack;
650         struct scatterlist *sg;
651         dma_addr_t dma_addr;
652         u64 page_mask, total_npages;
653         u32 npages, page_size = PAGE_SIZE;
654         bool first = true, is_huge_page_opt = true;
655         int rc, i, j;
656
657         phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
658         if (!phys_pg_pack)
659                 return -ENOMEM;
660
661         phys_pg_pack->vm_type = userptr->vm_type;
662         phys_pg_pack->created_from_userptr = true;
663         phys_pg_pack->asid = ctx->asid;
664         atomic_set(&phys_pg_pack->mapping_cnt, 1);
665
666         /* Only if all dma_addrs are aligned to 2MB and their
667          * sizes is at least 2MB, we can use huge page mapping.
668          * We limit the 2MB optimization to this condition,
669          * since later on we acquire the related VA range as one
670          * consecutive block.
671          */
672         total_npages = 0;
673         for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
674                 npages = get_sg_info(sg, &dma_addr);
675
676                 total_npages += npages;
677
678                 if (first) {
679                         first = false;
680                         dma_addr &= PAGE_MASK_2MB;
681                 }
682
683                 if ((npages % PGS_IN_2MB_PAGE) ||
684                                         (dma_addr & (PAGE_SIZE_2MB - 1)))
685                         is_huge_page_opt = false;
686         }
687
688         if (is_huge_page_opt) {
689                 page_size = PAGE_SIZE_2MB;
690                 total_npages /= PGS_IN_2MB_PAGE;
691         }
692
693         page_mask = ~(((u64) page_size) - 1);
694
695         phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
696                                                 GFP_KERNEL);
697         if (!phys_pg_pack->pages) {
698                 rc = -ENOMEM;
699                 goto page_pack_arr_mem_err;
700         }
701
702         phys_pg_pack->npages = total_npages;
703         phys_pg_pack->page_size = page_size;
704         phys_pg_pack->total_size = total_npages * page_size;
705
706         j = 0;
707         first = true;
708         for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
709                 npages = get_sg_info(sg, &dma_addr);
710
711                 /* align down to physical page size and save the offset */
712                 if (first) {
713                         first = false;
714                         phys_pg_pack->offset = dma_addr & (page_size - 1);
715                         dma_addr &= page_mask;
716                 }
717
718                 while (npages) {
719                         phys_pg_pack->pages[j++] = dma_addr;
720                         dma_addr += page_size;
721
722                         if (is_huge_page_opt)
723                                 npages -= PGS_IN_2MB_PAGE;
724                         else
725                                 npages--;
726                 }
727         }
728
729         *pphys_pg_pack = phys_pg_pack;
730
731         return 0;
732
733 page_pack_arr_mem_err:
734         kfree(phys_pg_pack);
735
736         return rc;
737 }
738
739 /*
740  * map_phys_page_pack - maps the physical page pack
741  *
742  * @ctx                : current context
743  * @vaddr              : start address of the virtual area to map from
744  * @phys_pg_pack       : the pack of physical pages to map to
745  *
746  * This function does the following:
747  * - Maps each chunk of virtual memory to matching physical chunk
748  * - Stores number of successful mappings in the given argument
749  * - Returns 0 on success, error code otherwise.
750  */
751 static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
752                 struct hl_vm_phys_pg_pack *phys_pg_pack)
753 {
754         struct hl_device *hdev = ctx->hdev;
755         u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
756         u32 page_size = phys_pg_pack->page_size;
757         int rc = 0;
758
759         for (i = 0 ; i < phys_pg_pack->npages ; i++) {
760                 paddr = phys_pg_pack->pages[i];
761
762                 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
763                 if (rc) {
764                         dev_err(hdev->dev,
765                                 "map failed for handle %u, npages: %llu, mapped: %llu",
766                                 phys_pg_pack->handle, phys_pg_pack->npages,
767                                 mapped_pg_cnt);
768                         goto err;
769                 }
770
771                 mapped_pg_cnt++;
772                 next_vaddr += page_size;
773         }
774
775         return 0;
776
777 err:
778         next_vaddr = vaddr;
779         for (i = 0 ; i < mapped_pg_cnt ; i++) {
780                 if (hl_mmu_unmap(ctx, next_vaddr, page_size))
781                         dev_warn_ratelimited(hdev->dev,
782                                 "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
783                                         phys_pg_pack->handle, next_vaddr,
784                                         phys_pg_pack->pages[i], page_size);
785
786                 next_vaddr += page_size;
787         }
788
789         return rc;
790 }
791
792 static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
793                                 u64 *paddr)
794 {
795         struct hl_device *hdev = ctx->hdev;
796         struct hl_vm *vm = &hdev->vm;
797         struct hl_vm_phys_pg_pack *phys_pg_pack;
798         u32 handle;
799
800         handle = lower_32_bits(args->map_device.handle);
801         spin_lock(&vm->idr_lock);
802         phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
803         if (!phys_pg_pack) {
804                 spin_unlock(&vm->idr_lock);
805                 dev_err(hdev->dev, "no match for handle %u\n", handle);
806                 return -EINVAL;
807         }
808
809         *paddr = phys_pg_pack->pages[0];
810
811         spin_unlock(&vm->idr_lock);
812
813         return 0;
814 }
815
816 /*
817  * map_device_va - map the given memory
818  *
819  * @ctx          : current context
820  * @args         : host parameters with handle/host virtual address
821  * @device_addr  : pointer to result device virtual address
822  *
823  * This function does the following:
824  * - If given a physical device memory handle, map to a device virtual block
825  *   and return the start address of this block
826  * - If given a host virtual address and size, find the related physical pages,
827  *   map a device virtual block to this pages and return the start address of
828  *   this block
829  */
830 static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
831                 u64 *device_addr)
832 {
833         struct hl_device *hdev = ctx->hdev;
834         struct hl_vm *vm = &hdev->vm;
835         struct hl_vm_phys_pg_pack *phys_pg_pack;
836         struct hl_userptr *userptr = NULL;
837         struct hl_vm_hash_node *hnode;
838         enum vm_type_t *vm_type;
839         u64 ret_vaddr, hint_addr;
840         u32 handle = 0;
841         int rc;
842         bool is_userptr = args->flags & HL_MEM_USERPTR;
843
844         /* Assume failure */
845         *device_addr = 0;
846
847         if (is_userptr) {
848                 rc = get_userptr_from_host_va(hdev, args, &userptr);
849                 if (rc) {
850                         dev_err(hdev->dev, "failed to get userptr from va\n");
851                         return rc;
852                 }
853
854                 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
855                                 &phys_pg_pack);
856                 if (rc) {
857                         dev_err(hdev->dev,
858                                 "unable to init page pack for vaddr 0x%llx\n",
859                                 args->map_host.host_virt_addr);
860                         goto init_page_pack_err;
861                 }
862
863                 vm_type = (enum vm_type_t *) userptr;
864                 hint_addr = args->map_host.hint_addr;
865         } else {
866                 handle = lower_32_bits(args->map_device.handle);
867
868                 spin_lock(&vm->idr_lock);
869                 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
870                 if (!phys_pg_pack) {
871                         spin_unlock(&vm->idr_lock);
872                         dev_err(hdev->dev,
873                                 "no match for handle %u\n", handle);
874                         return -EINVAL;
875                 }
876
877                 /* increment now to avoid freeing device memory while mapping */
878                 atomic_inc(&phys_pg_pack->mapping_cnt);
879
880                 spin_unlock(&vm->idr_lock);
881
882                 vm_type = (enum vm_type_t *) phys_pg_pack;
883
884                 hint_addr = args->map_device.hint_addr;
885         }
886
887         /*
888          * relevant for mapping device physical memory only, as host memory is
889          * implicitly shared
890          */
891         if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
892                         phys_pg_pack->asid != ctx->asid) {
893                 dev_err(hdev->dev,
894                         "Failed to map memory, handle %u is not shared\n",
895                         handle);
896                 rc = -EPERM;
897                 goto shared_err;
898         }
899
900         hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
901         if (!hnode) {
902                 rc = -ENOMEM;
903                 goto hnode_err;
904         }
905
906         ret_vaddr = get_va_block(hdev,
907                         is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
908                         phys_pg_pack->total_size, hint_addr, is_userptr);
909         if (!ret_vaddr) {
910                 dev_err(hdev->dev, "no available va block for handle %u\n",
911                                 handle);
912                 rc = -ENOMEM;
913                 goto va_block_err;
914         }
915
916         mutex_lock(&ctx->mmu_lock);
917
918         rc = map_phys_page_pack(ctx, ret_vaddr, phys_pg_pack);
919         if (rc) {
920                 mutex_unlock(&ctx->mmu_lock);
921                 dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
922                                 handle);
923                 goto map_err;
924         }
925
926         hdev->asic_funcs->mmu_invalidate_cache(hdev, false);
927
928         mutex_unlock(&ctx->mmu_lock);
929
930         ret_vaddr += phys_pg_pack->offset;
931
932         hnode->ptr = vm_type;
933         hnode->vaddr = ret_vaddr;
934
935         mutex_lock(&ctx->mem_hash_lock);
936         hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
937         mutex_unlock(&ctx->mem_hash_lock);
938
939         *device_addr = ret_vaddr;
940
941         if (is_userptr)
942                 free_phys_pg_pack(hdev, phys_pg_pack);
943
944         return 0;
945
946 map_err:
947         if (add_va_block(hdev,
948                         is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
949                         ret_vaddr,
950                         ret_vaddr + phys_pg_pack->total_size - 1))
951                 dev_warn(hdev->dev,
952                         "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
953                                 handle, ret_vaddr);
954
955 va_block_err:
956         kfree(hnode);
957 hnode_err:
958 shared_err:
959         atomic_dec(&phys_pg_pack->mapping_cnt);
960         if (is_userptr)
961                 free_phys_pg_pack(hdev, phys_pg_pack);
962 init_page_pack_err:
963         if (is_userptr)
964                 free_userptr(hdev, userptr);
965
966         return rc;
967 }
968
969 /*
970  * unmap_device_va      - unmap the given device virtual address
971  *
972  * @ctx                 : current context
973  * @vaddr               : device virtual address to unmap
974  *
975  * This function does the following:
976  * - Unmap the physical pages related to the given virtual address
977  * - return the device virtual block to the virtual block list
978  */
979 static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
980 {
981         struct hl_device *hdev = ctx->hdev;
982         struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
983         struct hl_vm_hash_node *hnode = NULL;
984         struct hl_userptr *userptr = NULL;
985         enum vm_type_t *vm_type;
986         u64 next_vaddr, i;
987         u32 page_size;
988         bool is_userptr;
989         int rc;
990
991         /* protect from double entrance */
992         mutex_lock(&ctx->mem_hash_lock);
993         hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
994                 if (vaddr == hnode->vaddr)
995                         break;
996
997         if (!hnode) {
998                 mutex_unlock(&ctx->mem_hash_lock);
999                 dev_err(hdev->dev,
1000                         "unmap failed, no mem hnode for vaddr 0x%llx\n",
1001                         vaddr);
1002                 return -EINVAL;
1003         }
1004
1005         hash_del(&hnode->node);
1006         mutex_unlock(&ctx->mem_hash_lock);
1007
1008         vm_type = hnode->ptr;
1009
1010         if (*vm_type == VM_TYPE_USERPTR) {
1011                 is_userptr = true;
1012                 userptr = hnode->ptr;
1013                 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1014                                 &phys_pg_pack);
1015                 if (rc) {
1016                         dev_err(hdev->dev,
1017                                 "unable to init page pack for vaddr 0x%llx\n",
1018                                 vaddr);
1019                         goto vm_type_err;
1020                 }
1021         } else if (*vm_type == VM_TYPE_PHYS_PACK) {
1022                 is_userptr = false;
1023                 phys_pg_pack = hnode->ptr;
1024         } else {
1025                 dev_warn(hdev->dev,
1026                         "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1027                                 vaddr);
1028                 rc = -EFAULT;
1029                 goto vm_type_err;
1030         }
1031
1032         if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
1033                 dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1034                 rc = -EINVAL;
1035                 goto mapping_cnt_err;
1036         }
1037
1038         page_size = phys_pg_pack->page_size;
1039         vaddr &= ~(((u64) page_size) - 1);
1040
1041         next_vaddr = vaddr;
1042
1043         mutex_lock(&ctx->mmu_lock);
1044
1045         for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
1046                 if (hl_mmu_unmap(ctx, next_vaddr, page_size))
1047                         dev_warn_ratelimited(hdev->dev,
1048                         "unmap failed for vaddr: 0x%llx\n", next_vaddr);
1049
1050                 /* unmapping on Palladium can be really long, so avoid a CPU
1051                  * soft lockup bug by sleeping a little between unmapping pages
1052                  */
1053                 if (hdev->pldm)
1054                         usleep_range(500, 1000);
1055         }
1056
1057         hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
1058
1059         mutex_unlock(&ctx->mmu_lock);
1060
1061         if (add_va_block(hdev,
1062                         is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
1063                         vaddr,
1064                         vaddr + phys_pg_pack->total_size - 1))
1065                 dev_warn(hdev->dev, "add va block failed for vaddr: 0x%llx\n",
1066                                 vaddr);
1067
1068         atomic_dec(&phys_pg_pack->mapping_cnt);
1069         kfree(hnode);
1070
1071         if (is_userptr) {
1072                 free_phys_pg_pack(hdev, phys_pg_pack);
1073                 free_userptr(hdev, userptr);
1074         }
1075
1076         return 0;
1077
1078 mapping_cnt_err:
1079         if (is_userptr)
1080                 free_phys_pg_pack(hdev, phys_pg_pack);
1081 vm_type_err:
1082         mutex_lock(&ctx->mem_hash_lock);
1083         hash_add(ctx->mem_hash, &hnode->node, vaddr);
1084         mutex_unlock(&ctx->mem_hash_lock);
1085
1086         return rc;
1087 }
1088
1089 static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
1090 {
1091         struct hl_device *hdev = hpriv->hdev;
1092         struct hl_ctx *ctx = hpriv->ctx;
1093         u64 device_addr = 0;
1094         u32 handle = 0;
1095         int rc;
1096
1097         switch (args->in.op) {
1098         case HL_MEM_OP_ALLOC:
1099                 if (args->in.alloc.mem_size == 0) {
1100                         dev_err(hdev->dev,
1101                                 "alloc size must be larger than 0\n");
1102                         rc = -EINVAL;
1103                         goto out;
1104                 }
1105
1106                 /* Force contiguous as there are no real MMU
1107                  * translations to overcome physical memory gaps
1108                  */
1109                 args->in.flags |= HL_MEM_CONTIGUOUS;
1110                 rc = alloc_device_memory(ctx, &args->in, &handle);
1111
1112                 memset(args, 0, sizeof(*args));
1113                 args->out.handle = (__u64) handle;
1114                 break;
1115
1116         case HL_MEM_OP_FREE:
1117                 rc = free_device_memory(ctx, args->in.free.handle);
1118                 break;
1119
1120         case HL_MEM_OP_MAP:
1121                 if (args->in.flags & HL_MEM_USERPTR) {
1122                         device_addr = args->in.map_host.host_virt_addr;
1123                         rc = 0;
1124                 } else {
1125                         rc = get_paddr_from_handle(ctx, &args->in,
1126                                         &device_addr);
1127                 }
1128
1129                 memset(args, 0, sizeof(*args));
1130                 args->out.device_virt_addr = device_addr;
1131                 break;
1132
1133         case HL_MEM_OP_UNMAP:
1134                 rc = 0;
1135                 break;
1136
1137         default:
1138                 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1139                 rc = -ENOTTY;
1140                 break;
1141         }
1142
1143 out:
1144         return rc;
1145 }
1146
1147 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
1148 {
1149         union hl_mem_args *args = data;
1150         struct hl_device *hdev = hpriv->hdev;
1151         struct hl_ctx *ctx = hpriv->ctx;
1152         u64 device_addr = 0;
1153         u32 handle = 0;
1154         int rc;
1155
1156         if (hl_device_disabled_or_in_reset(hdev)) {
1157                 dev_warn_ratelimited(hdev->dev,
1158                         "Device is %s. Can't execute MEMORY IOCTL\n",
1159                         atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
1160                 return -EBUSY;
1161         }
1162
1163         if (!hdev->mmu_enable)
1164                 return mem_ioctl_no_mmu(hpriv, args);
1165
1166         switch (args->in.op) {
1167         case HL_MEM_OP_ALLOC:
1168                 if (!hdev->dram_supports_virtual_memory) {
1169                         dev_err(hdev->dev, "DRAM alloc is not supported\n");
1170                         rc = -EINVAL;
1171                         goto out;
1172                 }
1173
1174                 if (args->in.alloc.mem_size == 0) {
1175                         dev_err(hdev->dev,
1176                                 "alloc size must be larger than 0\n");
1177                         rc = -EINVAL;
1178                         goto out;
1179                 }
1180                 rc = alloc_device_memory(ctx, &args->in, &handle);
1181
1182                 memset(args, 0, sizeof(*args));
1183                 args->out.handle = (__u64) handle;
1184                 break;
1185
1186         case HL_MEM_OP_FREE:
1187                 rc = free_device_memory(ctx, args->in.free.handle);
1188                 break;
1189
1190         case HL_MEM_OP_MAP:
1191                 rc = map_device_va(ctx, &args->in, &device_addr);
1192
1193                 memset(args, 0, sizeof(*args));
1194                 args->out.device_virt_addr = device_addr;
1195                 break;
1196
1197         case HL_MEM_OP_UNMAP:
1198                 rc = unmap_device_va(ctx,
1199                                 args->in.unmap.device_virt_addr);
1200                 break;
1201
1202         default:
1203                 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1204                 rc = -ENOTTY;
1205                 break;
1206         }
1207
1208 out:
1209         return rc;
1210 }
1211
1212 /*
1213  * hl_pin_host_memory - pins a chunk of host memory
1214  *
1215  * @hdev                : pointer to the habanalabs device structure
1216  * @addr                : the user-space virtual address of the memory area
1217  * @size                : the size of the memory area
1218  * @userptr             : pointer to hl_userptr structure
1219  *
1220  * This function does the following:
1221  * - Pins the physical pages
1222  * - Create a SG list from those pages
1223  */
1224 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
1225                         struct hl_userptr *userptr)
1226 {
1227         u64 start, end;
1228         u32 npages, offset;
1229         int rc;
1230
1231         if (!size) {
1232                 dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
1233                 return -EINVAL;
1234         }
1235
1236         if (!access_ok((void __user *) (uintptr_t) addr, size)) {
1237                 dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
1238                 return -EFAULT;
1239         }
1240
1241         /*
1242          * If the combination of the address and size requested for this memory
1243          * region causes an integer overflow, return error.
1244          */
1245         if (((addr + size) < addr) ||
1246                         PAGE_ALIGN(addr + size) < (addr + size)) {
1247                 dev_err(hdev->dev,
1248                         "user pointer 0x%llx + %llu causes integer overflow\n",
1249                         addr, size);
1250                 return -EINVAL;
1251         }
1252
1253         start = addr & PAGE_MASK;
1254         offset = addr & ~PAGE_MASK;
1255         end = PAGE_ALIGN(addr + size);
1256         npages = (end - start) >> PAGE_SHIFT;
1257
1258         userptr->size = size;
1259         userptr->addr = addr;
1260         userptr->dma_mapped = false;
1261         INIT_LIST_HEAD(&userptr->job_node);
1262
1263         userptr->vec = frame_vector_create(npages);
1264         if (!userptr->vec) {
1265                 dev_err(hdev->dev, "Failed to create frame vector\n");
1266                 return -ENOMEM;
1267         }
1268
1269         rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
1270                                 userptr->vec);
1271
1272         if (rc != npages) {
1273                 dev_err(hdev->dev,
1274                         "Failed to map host memory, user ptr probably wrong\n");
1275                 if (rc < 0)
1276                         goto destroy_framevec;
1277                 rc = -EFAULT;
1278                 goto put_framevec;
1279         }
1280
1281         if (frame_vector_to_pages(userptr->vec) < 0) {
1282                 dev_err(hdev->dev,
1283                         "Failed to translate frame vector to pages\n");
1284                 rc = -EFAULT;
1285                 goto put_framevec;
1286         }
1287
1288         userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
1289         if (!userptr->sgt) {
1290                 rc = -ENOMEM;
1291                 goto put_framevec;
1292         }
1293
1294         rc = sg_alloc_table_from_pages(userptr->sgt,
1295                                         frame_vector_pages(userptr->vec),
1296                                         npages, offset, size, GFP_ATOMIC);
1297         if (rc < 0) {
1298                 dev_err(hdev->dev, "failed to create SG table from pages\n");
1299                 goto free_sgt;
1300         }
1301
1302         hl_debugfs_add_userptr(hdev, userptr);
1303
1304         return 0;
1305
1306 free_sgt:
1307         kfree(userptr->sgt);
1308 put_framevec:
1309         put_vaddr_frames(userptr->vec);
1310 destroy_framevec:
1311         frame_vector_destroy(userptr->vec);
1312         return rc;
1313 }
1314
1315 /*
1316  * hl_unpin_host_memory - unpins a chunk of host memory
1317  *
1318  * @hdev                : pointer to the habanalabs device structure
1319  * @userptr             : pointer to hl_userptr structure
1320  *
1321  * This function does the following:
1322  * - Unpins the physical pages related to the host memory
1323  * - Free the SG list
1324  */
1325 int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
1326 {
1327         struct page **pages;
1328
1329         hl_debugfs_remove_userptr(hdev, userptr);
1330
1331         if (userptr->dma_mapped)
1332                 hdev->asic_funcs->hl_dma_unmap_sg(hdev,
1333                                 userptr->sgt->sgl,
1334                                 userptr->sgt->nents,
1335                                 userptr->dir);
1336
1337         pages = frame_vector_pages(userptr->vec);
1338         if (!IS_ERR(pages)) {
1339                 int i;
1340
1341                 for (i = 0; i < frame_vector_count(userptr->vec); i++)
1342                         set_page_dirty_lock(pages[i]);
1343         }
1344         put_vaddr_frames(userptr->vec);
1345         frame_vector_destroy(userptr->vec);
1346
1347         list_del(&userptr->job_node);
1348
1349         sg_free_table(userptr->sgt);
1350         kfree(userptr->sgt);
1351
1352         return 0;
1353 }
1354
1355 /*
1356  * hl_userptr_delete_list - clear userptr list
1357  *
1358  * @hdev                : pointer to the habanalabs device structure
1359  * @userptr_list        : pointer to the list to clear
1360  *
1361  * This function does the following:
1362  * - Iterates over the list and unpins the host memory and frees the userptr
1363  *   structure.
1364  */
1365 void hl_userptr_delete_list(struct hl_device *hdev,
1366                                 struct list_head *userptr_list)
1367 {
1368         struct hl_userptr *userptr, *tmp;
1369
1370         list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
1371                 hl_unpin_host_memory(hdev, userptr);
1372                 kfree(userptr);
1373         }
1374
1375         INIT_LIST_HEAD(userptr_list);
1376 }
1377
1378 /*
1379  * hl_userptr_is_pinned - returns whether the given userptr is pinned
1380  *
1381  * @hdev                : pointer to the habanalabs device structure
1382  * @userptr_list        : pointer to the list to clear
1383  * @userptr             : pointer to userptr to check
1384  *
1385  * This function does the following:
1386  * - Iterates over the list and checks if the given userptr is in it, means is
1387  *   pinned. If so, returns true, otherwise returns false.
1388  */
1389 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
1390                                 u32 size, struct list_head *userptr_list,
1391                                 struct hl_userptr **userptr)
1392 {
1393         list_for_each_entry((*userptr), userptr_list, job_node) {
1394                 if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
1395                         return true;
1396         }
1397
1398         return false;
1399 }
1400
1401 /*
1402  * hl_va_range_init - initialize virtual addresses range
1403  *
1404  * @hdev                : pointer to the habanalabs device structure
1405  * @va_range            : pointer to the range to initialize
1406  * @start               : range start address
1407  * @end                 : range end address
1408  *
1409  * This function does the following:
1410  * - Initializes the virtual addresses list of the given range with the given
1411  *   addresses.
1412  */
1413 static int hl_va_range_init(struct hl_device *hdev,
1414                 struct hl_va_range *va_range, u64 start, u64 end)
1415 {
1416         int rc;
1417
1418         INIT_LIST_HEAD(&va_range->list);
1419
1420         /* PAGE_SIZE alignment */
1421
1422         if (start & (PAGE_SIZE - 1)) {
1423                 start &= PAGE_MASK;
1424                 start += PAGE_SIZE;
1425         }
1426
1427         if (end & (PAGE_SIZE - 1))
1428                 end &= PAGE_MASK;
1429
1430         if (start >= end) {
1431                 dev_err(hdev->dev, "too small vm range for va list\n");
1432                 return -EFAULT;
1433         }
1434
1435         rc = add_va_block(hdev, va_range, start, end);
1436
1437         if (rc) {
1438                 dev_err(hdev->dev, "Failed to init host va list\n");
1439                 return rc;
1440         }
1441
1442         va_range->start_addr = start;
1443         va_range->end_addr = end;
1444
1445         return 0;
1446 }
1447
1448 /*
1449  * hl_vm_ctx_init_with_ranges - initialize virtual memory for context
1450  *
1451  * @ctx                 : pointer to the habanalabs context structure
1452  * @host_range_start    : host virtual addresses range start
1453  * @host_range_end      : host virtual addresses range end
1454  * @dram_range_start    : dram virtual addresses range start
1455  * @dram_range_end      : dram virtual addresses range end
1456  *
1457  * This function initializes the following:
1458  * - MMU for context
1459  * - Virtual address to area descriptor hashtable
1460  * - Virtual block list of available virtual memory
1461  */
1462 static int hl_vm_ctx_init_with_ranges(struct hl_ctx *ctx, u64 host_range_start,
1463                                 u64 host_range_end, u64 dram_range_start,
1464                                 u64 dram_range_end)
1465 {
1466         struct hl_device *hdev = ctx->hdev;
1467         int rc;
1468
1469         rc = hl_mmu_ctx_init(ctx);
1470         if (rc) {
1471                 dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
1472                 return rc;
1473         }
1474
1475         mutex_init(&ctx->mem_hash_lock);
1476         hash_init(ctx->mem_hash);
1477
1478         mutex_init(&ctx->host_va_range.lock);
1479
1480         rc = hl_va_range_init(hdev, &ctx->host_va_range, host_range_start,
1481                         host_range_end);
1482         if (rc) {
1483                 dev_err(hdev->dev, "failed to init host vm range\n");
1484                 goto host_vm_err;
1485         }
1486
1487         mutex_init(&ctx->dram_va_range.lock);
1488
1489         rc = hl_va_range_init(hdev, &ctx->dram_va_range, dram_range_start,
1490                         dram_range_end);
1491         if (rc) {
1492                 dev_err(hdev->dev, "failed to init dram vm range\n");
1493                 goto dram_vm_err;
1494         }
1495
1496         hl_debugfs_add_ctx_mem_hash(hdev, ctx);
1497
1498         return 0;
1499
1500 dram_vm_err:
1501         mutex_destroy(&ctx->dram_va_range.lock);
1502
1503         mutex_lock(&ctx->host_va_range.lock);
1504         clear_va_list_locked(hdev, &ctx->host_va_range.list);
1505         mutex_unlock(&ctx->host_va_range.lock);
1506 host_vm_err:
1507         mutex_destroy(&ctx->host_va_range.lock);
1508         mutex_destroy(&ctx->mem_hash_lock);
1509         hl_mmu_ctx_fini(ctx);
1510
1511         return rc;
1512 }
1513
1514 int hl_vm_ctx_init(struct hl_ctx *ctx)
1515 {
1516         struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
1517         u64 host_range_start, host_range_end, dram_range_start,
1518                 dram_range_end;
1519
1520         atomic64_set(&ctx->dram_phys_mem, 0);
1521
1522         /*
1523          * - If MMU is enabled, init the ranges as usual.
1524          * - If MMU is disabled, in case of host mapping, the returned address
1525          *   is the given one.
1526          *   In case of DRAM mapping, the returned address is the physical
1527          *   address of the memory related to the given handle.
1528          */
1529         if (ctx->hdev->mmu_enable) {
1530                 dram_range_start = prop->va_space_dram_start_address;
1531                 dram_range_end = prop->va_space_dram_end_address;
1532                 host_range_start = prop->va_space_host_start_address;
1533                 host_range_end = prop->va_space_host_end_address;
1534         } else {
1535                 dram_range_start = prop->dram_user_base_address;
1536                 dram_range_end = prop->dram_end_address;
1537                 host_range_start = prop->dram_user_base_address;
1538                 host_range_end = prop->dram_end_address;
1539         }
1540
1541         return hl_vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
1542                         dram_range_start, dram_range_end);
1543 }
1544
1545 /*
1546  * hl_va_range_fini     - clear a virtual addresses range
1547  *
1548  * @hdev                : pointer to the habanalabs structure
1549  * va_range             : pointer to virtual addresses range
1550  *
1551  * This function initializes the following:
1552  * - Checks that the given range contains the whole initial range
1553  * - Frees the virtual addresses block list and its lock
1554  */
1555 static void hl_va_range_fini(struct hl_device *hdev,
1556                 struct hl_va_range *va_range)
1557 {
1558         struct hl_vm_va_block *va_block;
1559
1560         if (list_empty(&va_range->list)) {
1561                 dev_warn(hdev->dev,
1562                                 "va list should not be empty on cleanup!\n");
1563                 goto out;
1564         }
1565
1566         if (!list_is_singular(&va_range->list)) {
1567                 dev_warn(hdev->dev,
1568                         "va list should not contain multiple blocks on cleanup!\n");
1569                 goto free_va_list;
1570         }
1571
1572         va_block = list_first_entry(&va_range->list, typeof(*va_block), node);
1573
1574         if (va_block->start != va_range->start_addr ||
1575                 va_block->end != va_range->end_addr) {
1576                 dev_warn(hdev->dev,
1577                         "wrong va block on cleanup, from 0x%llx to 0x%llx\n",
1578                                 va_block->start, va_block->end);
1579                 goto free_va_list;
1580         }
1581
1582 free_va_list:
1583         mutex_lock(&va_range->lock);
1584         clear_va_list_locked(hdev, &va_range->list);
1585         mutex_unlock(&va_range->lock);
1586
1587 out:
1588         mutex_destroy(&va_range->lock);
1589 }
1590
1591 /*
1592  * hl_vm_ctx_fini       - virtual memory teardown of context
1593  *
1594  * @ctx                 : pointer to the habanalabs context structure
1595  *
1596  * This function perform teardown the following:
1597  * - Virtual block list of available virtual memory
1598  * - Virtual address to area descriptor hashtable
1599  * - MMU for context
1600  *
1601  * In addition this function does the following:
1602  * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
1603  *   hashtable should be empty as no valid mappings should exist at this
1604  *   point.
1605  * - Frees any existing physical page list from the idr which relates to the
1606  *   current context asid.
1607  * - This function checks the virtual block list for correctness. At this point
1608  *   the list should contain one element which describes the whole virtual
1609  *   memory range of the context. Otherwise, a warning is printed.
1610  */
1611 void hl_vm_ctx_fini(struct hl_ctx *ctx)
1612 {
1613         struct hl_device *hdev = ctx->hdev;
1614         struct hl_vm *vm = &hdev->vm;
1615         struct hl_vm_phys_pg_pack *phys_pg_list;
1616         struct hl_vm_hash_node *hnode;
1617         struct hlist_node *tmp_node;
1618         int i;
1619
1620         hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
1621
1622         if (!hash_empty(ctx->mem_hash))
1623                 dev_notice(hdev->dev, "ctx is freed while it has va in use\n");
1624
1625         hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
1626                 dev_dbg(hdev->dev,
1627                         "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
1628                         hnode->vaddr, ctx->asid);
1629                 unmap_device_va(ctx, hnode->vaddr);
1630         }
1631
1632         spin_lock(&vm->idr_lock);
1633         idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
1634                 if (phys_pg_list->asid == ctx->asid) {
1635                         dev_dbg(hdev->dev,
1636                                 "page list 0x%p of asid %d is still alive\n",
1637                                 phys_pg_list, ctx->asid);
1638                         free_phys_pg_pack(hdev, phys_pg_list);
1639                         idr_remove(&vm->phys_pg_pack_handles, i);
1640                 }
1641         spin_unlock(&vm->idr_lock);
1642
1643         hl_va_range_fini(hdev, &ctx->dram_va_range);
1644         hl_va_range_fini(hdev, &ctx->host_va_range);
1645
1646         mutex_destroy(&ctx->mem_hash_lock);
1647         hl_mmu_ctx_fini(ctx);
1648 }
1649
1650 /*
1651  * hl_vm_init           - initialize virtual memory module
1652  *
1653  * @hdev                : pointer to the habanalabs device structure
1654  *
1655  * This function initializes the following:
1656  * - MMU module
1657  * - DRAM physical pages pool of 2MB
1658  * - Idr for device memory allocation handles
1659  */
1660 int hl_vm_init(struct hl_device *hdev)
1661 {
1662         struct asic_fixed_properties *prop = &hdev->asic_prop;
1663         struct hl_vm *vm = &hdev->vm;
1664         int rc;
1665
1666         rc = hl_mmu_init(hdev);
1667         if (rc) {
1668                 dev_err(hdev->dev, "Failed to init MMU\n");
1669                 return rc;
1670         }
1671
1672         vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1);
1673         if (!vm->dram_pg_pool) {
1674                 dev_err(hdev->dev, "Failed to create dram page pool\n");
1675                 rc = -ENOMEM;
1676                 goto pool_create_err;
1677         }
1678
1679         kref_init(&vm->dram_pg_pool_refcount);
1680
1681         rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
1682                         prop->dram_end_address - prop->dram_user_base_address,
1683                         -1);
1684
1685         if (rc) {
1686                 dev_err(hdev->dev,
1687                         "Failed to add memory to dram page pool %d\n", rc);
1688                 goto pool_add_err;
1689         }
1690
1691         spin_lock_init(&vm->idr_lock);
1692         idr_init(&vm->phys_pg_pack_handles);
1693
1694         atomic64_set(&hdev->dram_used_mem, 0);
1695
1696         vm->init_done = true;
1697
1698         return 0;
1699
1700 pool_add_err:
1701         gen_pool_destroy(vm->dram_pg_pool);
1702 pool_create_err:
1703         hl_mmu_fini(hdev);
1704
1705         return rc;
1706 }
1707
1708 /*
1709  * hl_vm_fini           - virtual memory module teardown
1710  *
1711  * @hdev                : pointer to the habanalabs device structure
1712  *
1713  * This function perform teardown to the following:
1714  * - Idr for device memory allocation handles
1715  * - DRAM physical pages pool of 2MB
1716  * - MMU module
1717  */
1718 void hl_vm_fini(struct hl_device *hdev)
1719 {
1720         struct hl_vm *vm = &hdev->vm;
1721
1722         if (!vm->init_done)
1723                 return;
1724
1725         /*
1726          * At this point all the contexts should be freed and hence no DRAM
1727          * memory should be in use. Hence the DRAM pool should be freed here.
1728          */
1729         if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
1730                 dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
1731                                 __func__);
1732
1733         hl_mmu_fini(hdev);
1734
1735         vm->init_done = false;
1736 }