1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "habanalabs.h"
9 #include "include/hw_ip/mmu/mmu_general.h"
11 #include <linux/genalloc.h>
12 #include <linux/slab.h>
14 static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 addr)
16 struct pgt_info *pgt_info = NULL;
18 hash_for_each_possible(ctx->mmu_hash, pgt_info, node,
20 if (addr == pgt_info->addr)
26 static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
28 struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
30 gen_pool_free(pgt_info->ctx->hdev->mmu_pgt_pool, pgt_info->addr,
31 ctx->hdev->asic_prop.mmu_hop_table_size);
32 hash_del(&pgt_info->node);
37 static u64 alloc_hop(struct hl_ctx *ctx)
39 struct hl_device *hdev = ctx->hdev;
40 struct pgt_info *pgt_info;
43 pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
47 addr = (u64) gen_pool_alloc(hdev->mmu_pgt_pool,
48 hdev->asic_prop.mmu_hop_table_size);
50 dev_err(hdev->dev, "failed to allocate page\n");
55 pgt_info->addr = addr;
57 pgt_info->num_of_ptes = 0;
58 hash_add(ctx->mmu_hash, &pgt_info->node, addr);
63 static inline void clear_pte(struct hl_device *hdev, u64 pte_addr)
65 /* clear the last and present bits */
66 hdev->asic_funcs->write_pte(hdev, pte_addr, 0);
69 static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
71 get_pgt_info(ctx, hop_addr)->num_of_ptes++;
75 * put_pte - decrement the num of ptes and free the hop if possible
77 * @ctx: pointer to the context structure
78 * @hop_addr: addr of the hop
80 * This function returns the number of ptes left on this hop. If the number is
81 * 0, it means the pte was freed.
83 static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
85 struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
88 pgt_info->num_of_ptes--;
91 * Need to save the number of ptes left because free_hop might free
94 num_of_ptes_left = pgt_info->num_of_ptes;
95 if (!num_of_ptes_left)
96 free_hop(ctx, hop_addr);
98 return num_of_ptes_left;
101 static inline u64 get_hop0_addr(struct hl_ctx *ctx)
103 return ctx->hdev->asic_prop.mmu_pgt_addr +
104 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
107 static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
108 u64 virt_addr, u64 mask, u64 shift)
110 return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
111 ((virt_addr & mask) >> shift);
114 static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
116 return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP0_MASK, HOP0_SHIFT);
119 static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
121 return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP1_MASK, HOP1_SHIFT);
124 static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
126 return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP2_MASK, HOP2_SHIFT);
129 static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
131 return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP3_MASK, HOP3_SHIFT);
134 static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
136 return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP4_MASK, HOP4_SHIFT);
139 static inline u64 get_next_hop_addr(u64 curr_pte)
141 if (curr_pte & PAGE_PRESENT_MASK)
142 return curr_pte & PHYS_ADDR_MASK;
147 static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
150 u64 hop_addr = get_next_hop_addr(curr_pte);
152 if (hop_addr == ULLONG_MAX) {
153 hop_addr = alloc_hop(ctx);
154 *is_new_hop = (hop_addr != ULLONG_MAX);
161 * hl_mmu_init - init the mmu module
163 * @hdev: pointer to the habanalabs device structure
165 * This function does the following:
166 * - Allocate max_asid zeroed hop0 pgts so no mapping is available
168 * - Invalidate the mmu cache
169 * - Create a pool of pages for pgts
170 * - Returns 0 on success
172 * This function depends on DMA QMAN to be working!
174 int hl_mmu_init(struct hl_device *hdev)
176 struct asic_fixed_properties *prop = &hdev->asic_prop;
179 if (!hdev->mmu_enable)
182 /* MMU HW init was already done in device hw_init() */
184 mutex_init(&hdev->mmu_cache_lock);
187 gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
189 if (!hdev->mmu_pgt_pool) {
190 dev_err(hdev->dev, "Failed to create page gen pool\n");
192 goto err_pool_create;
195 rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
196 prop->mmu_hop0_tables_total_size,
197 prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
200 dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
207 gen_pool_destroy(hdev->mmu_pgt_pool);
209 mutex_destroy(&hdev->mmu_cache_lock);
215 * hl_mmu_fini - release the mmu module.
217 * @hdev: pointer to the habanalabs device structure
219 * This function does the following:
220 * - Disable mmu in hw
221 * - free the pgts pool
223 * All ctxs should be freed before calling this func
225 void hl_mmu_fini(struct hl_device *hdev)
227 if (!hdev->mmu_enable)
230 gen_pool_destroy(hdev->mmu_pgt_pool);
232 mutex_destroy(&hdev->mmu_cache_lock);
234 /* MMU HW fini will be done in device hw_fini() */
238 * hl_mmu_ctx_init() - initialize a context for using the MMU module.
239 * @ctx: pointer to the context structure to initialize.
241 * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
242 * page tables hops related to this context and an optional DRAM default page
244 * Return: 0 on success, non-zero otherwise.
246 int hl_mmu_ctx_init(struct hl_ctx *ctx)
248 struct hl_device *hdev = ctx->hdev;
249 struct asic_fixed_properties *prop = &hdev->asic_prop;
250 u64 num_of_hop3, total_hops, hop1_addr, hop2_addr, hop2_pte_addr,
251 hop3_pte_addr, pte_val;
252 int rc, i, j, hop3_allocated = 0;
254 if (!hdev->mmu_enable)
257 mutex_init(&ctx->mmu_lock);
258 hash_init(ctx->mmu_hash);
260 if (!hdev->dram_supports_virtual_memory ||
261 !hdev->dram_default_page_mapping)
264 num_of_hop3 = prop->dram_size_for_default_page_mapping;
265 do_div(num_of_hop3, prop->dram_page_size);
266 do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
268 /* add hop1 and hop2 */
269 total_hops = num_of_hop3 + 2;
271 ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL);
272 if (!ctx->dram_default_hops) {
277 hop1_addr = alloc_hop(ctx);
278 if (hop1_addr == ULLONG_MAX) {
279 dev_err(hdev->dev, "failed to alloc hop 1\n");
284 ctx->dram_default_hops[total_hops - 1] = hop1_addr;
286 hop2_addr = alloc_hop(ctx);
287 if (hop2_addr == ULLONG_MAX) {
288 dev_err(hdev->dev, "failed to alloc hop 2\n");
293 ctx->dram_default_hops[total_hops - 2] = hop2_addr;
295 for (i = 0 ; i < num_of_hop3 ; i++) {
296 ctx->dram_default_hops[i] = alloc_hop(ctx);
297 if (ctx->dram_default_hops[i] == ULLONG_MAX) {
298 dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i);
305 /* need only pte 0 in hops 0 and 1 */
306 pte_val = (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
307 hdev->asic_funcs->write_pte(hdev, get_hop0_addr(ctx), pte_val);
309 pte_val = (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
310 hdev->asic_funcs->write_pte(hdev, hop1_addr, pte_val);
311 get_pte(ctx, hop1_addr);
313 hop2_pte_addr = hop2_addr;
314 for (i = 0 ; i < num_of_hop3 ; i++) {
315 pte_val = (ctx->dram_default_hops[i] & PTE_PHYS_ADDR_MASK) |
317 hdev->asic_funcs->write_pte(hdev, hop2_pte_addr, pte_val);
318 get_pte(ctx, hop2_addr);
319 hop2_pte_addr += HL_PTE_SIZE;
322 pte_val = (prop->mmu_dram_default_page_addr & PTE_PHYS_ADDR_MASK) |
323 LAST_MASK | PAGE_PRESENT_MASK;
325 for (i = 0 ; i < num_of_hop3 ; i++) {
326 hop3_pte_addr = ctx->dram_default_hops[i];
327 for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
328 hdev->asic_funcs->write_pte(hdev, hop3_pte_addr,
330 get_pte(ctx, ctx->dram_default_hops[i]);
331 hop3_pte_addr += HL_PTE_SIZE;
335 /* flush all writes to reach PCI */
337 hdev->asic_funcs->read_pte(hdev, hop2_addr);
342 for (i = 0 ; i < hop3_allocated ; i++)
343 free_hop(ctx, ctx->dram_default_hops[i]);
344 free_hop(ctx, hop2_addr);
346 free_hop(ctx, hop1_addr);
348 kfree(ctx->dram_default_hops);
350 mutex_destroy(&ctx->mmu_lock);
356 * hl_mmu_ctx_fini - disable a ctx from using the mmu module
358 * @ctx: pointer to the context structure
360 * This function does the following:
361 * - Free any pgts which were not freed yet
363 * - Free DRAM default page mapping hops
365 void hl_mmu_ctx_fini(struct hl_ctx *ctx)
367 struct hl_device *hdev = ctx->hdev;
368 struct asic_fixed_properties *prop = &hdev->asic_prop;
369 struct pgt_info *pgt_info;
370 struct hlist_node *tmp;
371 u64 num_of_hop3, total_hops, hop1_addr, hop2_addr, hop2_pte_addr,
375 if (!ctx->hdev->mmu_enable)
378 if (hdev->dram_supports_virtual_memory &&
379 hdev->dram_default_page_mapping) {
381 num_of_hop3 = prop->dram_size_for_default_page_mapping;
382 do_div(num_of_hop3, prop->dram_page_size);
383 do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
385 /* add hop1 and hop2 */
386 total_hops = num_of_hop3 + 2;
387 hop1_addr = ctx->dram_default_hops[total_hops - 1];
388 hop2_addr = ctx->dram_default_hops[total_hops - 2];
390 for (i = 0 ; i < num_of_hop3 ; i++) {
391 hop3_pte_addr = ctx->dram_default_hops[i];
392 for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
393 clear_pte(hdev, hop3_pte_addr);
394 put_pte(ctx, ctx->dram_default_hops[i]);
395 hop3_pte_addr += HL_PTE_SIZE;
399 hop2_pte_addr = hop2_addr;
400 for (i = 0 ; i < num_of_hop3 ; i++) {
401 clear_pte(hdev, hop2_pte_addr);
402 put_pte(ctx, hop2_addr);
403 hop2_pte_addr += HL_PTE_SIZE;
406 clear_pte(hdev, hop1_addr);
407 put_pte(ctx, hop1_addr);
408 clear_pte(hdev, get_hop0_addr(ctx));
410 kfree(ctx->dram_default_hops);
412 /* flush all writes to reach PCI */
414 hdev->asic_funcs->read_pte(hdev, hop2_addr);
417 if (!hash_empty(ctx->mmu_hash))
418 dev_err(hdev->dev, "ctx is freed while it has pgts in use\n");
420 hash_for_each_safe(ctx->mmu_hash, i, tmp, pgt_info, node) {
422 "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
423 pgt_info->addr, ctx->asid, pgt_info->num_of_ptes);
424 free_hop(ctx, pgt_info->addr);
427 mutex_destroy(&ctx->mmu_lock);
430 static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
432 struct hl_device *hdev = ctx->hdev;
433 struct asic_fixed_properties *prop = &hdev->asic_prop;
434 u64 hop0_addr = 0, hop0_pte_addr = 0,
435 hop1_addr = 0, hop1_pte_addr = 0,
436 hop2_addr = 0, hop2_pte_addr = 0,
437 hop3_addr = 0, hop3_pte_addr = 0,
438 hop4_addr = 0, hop4_pte_addr = 0,
441 bool is_dram_addr, is_huge, is_dram_default_page_mapping;
443 is_dram_addr = hl_mem_area_inside_range(virt_addr, PAGE_SIZE_2MB,
444 prop->va_space_dram_start_address,
445 prop->va_space_dram_end_address);
447 hop0_addr = get_hop0_addr(ctx);
449 hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
451 curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
453 hop1_addr = get_next_hop_addr(curr_pte);
455 if (hop1_addr == ULLONG_MAX)
458 hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
460 curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
462 hop2_addr = get_next_hop_addr(curr_pte);
464 if (hop2_addr == ULLONG_MAX)
467 hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
469 curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
471 hop3_addr = get_next_hop_addr(curr_pte);
473 if (hop3_addr == ULLONG_MAX)
476 hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
478 curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
480 is_huge = curr_pte & LAST_MASK;
482 if (is_dram_addr && !is_huge) {
484 "DRAM unmapping should use huge pages only\n");
488 is_dram_default_page_mapping =
489 hdev->dram_default_page_mapping && is_dram_addr;
492 hop4_addr = get_next_hop_addr(curr_pte);
494 if (hop4_addr == ULLONG_MAX)
497 hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
499 curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
504 if (is_dram_default_page_mapping) {
505 u64 zero_pte = (prop->mmu_dram_default_page_addr &
506 PTE_PHYS_ADDR_MASK) | LAST_MASK |
508 if (curr_pte == zero_pte) {
510 "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
515 if (!(curr_pte & PAGE_PRESENT_MASK)) {
517 "DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n",
522 hdev->asic_funcs->write_pte(hdev, hop3_pte_addr, zero_pte);
523 put_pte(ctx, hop3_addr);
525 if (!(curr_pte & PAGE_PRESENT_MASK))
528 clear_pte(hdev, hop4_addr ? hop4_pte_addr : hop3_pte_addr);
530 if (hop4_addr && !put_pte(ctx, hop4_addr))
535 clear_pte(hdev, hop3_pte_addr);
537 if (put_pte(ctx, hop3_addr))
539 clear_pte(hdev, hop2_pte_addr);
541 if (put_pte(ctx, hop2_addr))
543 clear_pte(hdev, hop1_pte_addr);
545 if (put_pte(ctx, hop1_addr))
547 clear_pte(hdev, hop0_pte_addr);
551 /* flush all writes from all cores to reach PCI */
554 hdev->asic_funcs->read_pte(hdev,
555 hop4_addr ? hop4_pte_addr : hop3_pte_addr);
560 dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
567 * hl_mmu_unmap - unmaps a virtual addr
569 * @ctx: pointer to the context structure
570 * @virt_addr: virt addr to map from
571 * @page_size: size of the page to unmap
573 * This function does the following:
574 * - Check that the virt addr is mapped
575 * - Unmap the virt addr and frees pgts if possible
576 * - Returns 0 on success, -EINVAL if the given addr is not mapped
578 * Because this function changes the page tables in the device and because it
579 * changes the MMU hash, it must be protected by a lock.
580 * However, because it maps only a single page, the lock should be implemented
581 * in a higher level in order to protect the entire mapping of the memory area
583 int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
585 struct hl_device *hdev = ctx->hdev;
587 u32 real_page_size, npages;
590 if (!hdev->mmu_enable)
594 * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
595 * is bigger, we break it to sub-pages and unmap them separately.
597 if ((page_size % PAGE_SIZE_2MB) == 0) {
598 real_page_size = PAGE_SIZE_2MB;
599 } else if ((page_size % PAGE_SIZE_4KB) == 0) {
600 real_page_size = PAGE_SIZE_4KB;
603 "page size of %u is not 4KB nor 2MB aligned, can't unmap\n",
609 npages = page_size / real_page_size;
610 real_virt_addr = virt_addr;
612 for (i = 0 ; i < npages ; i++) {
613 rc = _hl_mmu_unmap(ctx, real_virt_addr);
617 real_virt_addr += real_page_size;
623 static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
626 struct hl_device *hdev = ctx->hdev;
627 struct asic_fixed_properties *prop = &hdev->asic_prop;
628 u64 hop0_addr = 0, hop0_pte_addr = 0,
629 hop1_addr = 0, hop1_pte_addr = 0,
630 hop2_addr = 0, hop2_pte_addr = 0,
631 hop3_addr = 0, hop3_pte_addr = 0,
632 hop4_addr = 0, hop4_pte_addr = 0,
634 bool hop1_new = false, hop2_new = false, hop3_new = false,
635 hop4_new = false, is_huge, is_dram_addr,
636 is_dram_default_page_mapping;
640 * This mapping function can map a 4KB/2MB page. For 2MB page there are
641 * only 3 hops rather than 4. Currently the DRAM allocation uses 2MB
642 * pages only but user memory could have been allocated with one of the
643 * two page sizes. Since this is a common code for all the three cases,
644 * we need this hugs page check.
646 is_huge = page_size == PAGE_SIZE_2MB;
648 is_dram_addr = hl_mem_area_inside_range(virt_addr, page_size,
649 prop->va_space_dram_start_address,
650 prop->va_space_dram_end_address);
652 if (is_dram_addr && !is_huge) {
653 dev_err(hdev->dev, "DRAM mapping should use huge pages only\n");
657 is_dram_default_page_mapping =
658 hdev->dram_default_page_mapping && is_dram_addr;
660 hop0_addr = get_hop0_addr(ctx);
662 hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
664 curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
666 hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
668 if (hop1_addr == ULLONG_MAX)
671 hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
673 curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
675 hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
677 if (hop2_addr == ULLONG_MAX)
680 hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
682 curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
684 hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
686 if (hop3_addr == ULLONG_MAX)
689 hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
691 curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
694 hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new);
696 if (hop4_addr == ULLONG_MAX)
699 hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
701 curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
704 if (is_dram_default_page_mapping) {
705 u64 zero_pte = (prop->mmu_dram_default_page_addr &
706 PTE_PHYS_ADDR_MASK) | LAST_MASK |
709 if (curr_pte != zero_pte) {
711 "DRAM: mapping already exists for virt_addr 0x%llx\n",
717 if (hop1_new || hop2_new || hop3_new || hop4_new) {
719 "DRAM mapping should not allocate more hops\n");
723 } else if (curr_pte & PAGE_PRESENT_MASK) {
725 "mapping already exists for virt_addr 0x%llx\n",
728 dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n",
729 hdev->asic_funcs->read_pte(hdev, hop0_pte_addr),
731 dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n",
732 hdev->asic_funcs->read_pte(hdev, hop1_pte_addr),
734 dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n",
735 hdev->asic_funcs->read_pte(hdev, hop2_pte_addr),
737 dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n",
738 hdev->asic_funcs->read_pte(hdev, hop3_pte_addr),
742 dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n",
743 hdev->asic_funcs->read_pte(hdev,
751 curr_pte = (phys_addr & PTE_PHYS_ADDR_MASK) | LAST_MASK
754 hdev->asic_funcs->write_pte(hdev,
755 is_huge ? hop3_pte_addr : hop4_pte_addr,
759 curr_pte = (hop1_addr & PTE_PHYS_ADDR_MASK) |
761 ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop0_pte_addr,
765 curr_pte = (hop2_addr & PTE_PHYS_ADDR_MASK) |
767 ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop1_pte_addr,
769 get_pte(ctx, hop1_addr);
772 curr_pte = (hop3_addr & PTE_PHYS_ADDR_MASK) |
774 ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop2_pte_addr,
776 get_pte(ctx, hop2_addr);
781 curr_pte = (hop4_addr & PTE_PHYS_ADDR_MASK) |
783 ctx->hdev->asic_funcs->write_pte(ctx->hdev,
784 hop3_pte_addr, curr_pte);
785 get_pte(ctx, hop3_addr);
788 get_pte(ctx, hop4_addr);
790 get_pte(ctx, hop3_addr);
793 /* flush all writes from all cores to reach PCI */
796 hdev->asic_funcs->read_pte(hdev,
797 is_huge ? hop3_pte_addr : hop4_pte_addr);
803 free_hop(ctx, hop4_addr);
805 free_hop(ctx, hop3_addr);
807 free_hop(ctx, hop2_addr);
809 free_hop(ctx, hop1_addr);
815 * hl_mmu_map - maps a virtual addr to physical addr
817 * @ctx: pointer to the context structure
818 * @virt_addr: virt addr to map from
819 * @phys_addr: phys addr to map to
820 * @page_size: physical page size
822 * This function does the following:
823 * - Check that the virt addr is not mapped
824 * - Allocate pgts as necessary in order to map the virt addr to the phys
825 * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM.
827 * Because this function changes the page tables in the device and because it
828 * changes the MMU hash, it must be protected by a lock.
829 * However, because it maps only a single page, the lock should be implemented
830 * in a higher level in order to protect the entire mapping of the memory area
832 int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
834 struct hl_device *hdev = ctx->hdev;
835 u64 real_virt_addr, real_phys_addr;
836 u32 real_page_size, npages;
837 int i, rc, mapped_cnt = 0;
839 if (!hdev->mmu_enable)
843 * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
844 * is bigger, we break it to sub-pages and map them separately.
846 if ((page_size % PAGE_SIZE_2MB) == 0) {
847 real_page_size = PAGE_SIZE_2MB;
848 } else if ((page_size % PAGE_SIZE_4KB) == 0) {
849 real_page_size = PAGE_SIZE_4KB;
852 "page size of %u is not 4KB nor 2MB aligned, can't map\n",
858 npages = page_size / real_page_size;
859 real_virt_addr = virt_addr;
860 real_phys_addr = phys_addr;
862 for (i = 0 ; i < npages ; i++) {
863 rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
868 real_virt_addr += real_page_size;
869 real_phys_addr += real_page_size;
876 real_virt_addr = virt_addr;
877 for (i = 0 ; i < mapped_cnt ; i++) {
878 if (_hl_mmu_unmap(ctx, real_virt_addr))
879 dev_warn_ratelimited(hdev->dev,
880 "failed to unmap va: 0x%llx\n", real_virt_addr);
882 real_virt_addr += real_page_size;
889 * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out
891 * @ctx: pointer to the context structure
894 void hl_mmu_swap_out(struct hl_ctx *ctx)
900 * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in
902 * @ctx: pointer to the context structure
905 void hl_mmu_swap_in(struct hl_ctx *ctx)