2 * Copyright 2017 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #define NVKM_VMM_LEVELS_MAX 5
25 #include <subdev/fb.h>
28 nvkm_vmm_pt_del(struct nvkm_vmm_pt **ppgt)
30 struct nvkm_vmm_pt *pgt = *ppgt;
39 static struct nvkm_vmm_pt *
40 nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse,
41 const struct nvkm_vmm_page *page)
43 const u32 pten = 1 << desc->bits;
44 struct nvkm_vmm_pt *pgt;
47 if (desc->type > PGT) {
48 if (desc->type == SPT) {
49 const struct nvkm_vmm_desc *pair = page[-1].desc;
50 lpte = pten >> (desc->bits - pair->bits);
56 if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL)))
58 pgt->page = page ? page->shift : 0;
61 if (desc->type == PGD) {
62 pgt->pde = kvcalloc(pten, sizeof(*pgt->pde), GFP_KERNEL);
72 struct nvkm_vmm_iter {
73 const struct nvkm_vmm_page *page;
74 const struct nvkm_vmm_desc *desc;
78 u32 pte[NVKM_VMM_LEVELS_MAX];
79 struct nvkm_vmm_pt *pt[NVKM_VMM_LEVELS_MAX];
83 #ifdef CONFIG_NOUVEAU_DEBUG_MMU
85 nvkm_vmm_desc_type(const struct nvkm_vmm_desc *desc)
88 case PGD: return "PGD";
89 case PGT: return "PGT";
90 case SPT: return "SPT";
91 case LPT: return "LPT";
98 nvkm_vmm_trace(struct nvkm_vmm_iter *it, char *buf)
101 for (lvl = it->max; lvl >= 0; lvl--) {
103 buf += sprintf(buf, "%05x:", it->pte[lvl]);
105 buf += sprintf(buf, "xxxxx:");
109 #define TRA(i,f,a...) do { \
110 char _buf[NVKM_VMM_LEVELS_MAX * 7]; \
111 struct nvkm_vmm_iter *_it = (i); \
112 nvkm_vmm_trace(_it, _buf); \
113 VMM_TRACE(_it->vmm, "%s "f, _buf, ##a); \
116 #define TRA(i,f,a...)
120 nvkm_vmm_flush_mark(struct nvkm_vmm_iter *it)
122 it->flush = min(it->flush, it->max - it->lvl);
126 nvkm_vmm_flush(struct nvkm_vmm_iter *it)
128 if (it->flush != NVKM_VMM_LEVELS_MAX) {
129 if (it->vmm->func->flush) {
130 TRA(it, "flush: %d", it->flush);
131 it->vmm->func->flush(it->vmm, it->flush);
133 it->flush = NVKM_VMM_LEVELS_MAX;
138 nvkm_vmm_unref_pdes(struct nvkm_vmm_iter *it)
140 const struct nvkm_vmm_desc *desc = it->desc;
141 const int type = desc[it->lvl].type == SPT;
142 struct nvkm_vmm_pt *pgd = it->pt[it->lvl + 1];
143 struct nvkm_vmm_pt *pgt = it->pt[it->lvl];
144 struct nvkm_mmu_pt *pt = pgt->pt[type];
145 struct nvkm_vmm *vmm = it->vmm;
146 u32 pdei = it->pte[it->lvl + 1];
148 /* Recurse up the tree, unreferencing/destroying unneeded PDs. */
150 if (--pgd->refs[0]) {
151 const struct nvkm_vmm_desc_func *func = desc[it->lvl].func;
152 /* PD has other valid PDEs, so we need a proper update. */
153 TRA(it, "PDE unmap %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
154 pgt->pt[type] = NULL;
155 if (!pgt->refs[!type]) {
156 /* PDE no longer required. */
159 func->sparse(vmm, pgd->pt[0], pdei, 1);
160 pgd->pde[pdei] = NVKM_VMM_PDE_SPARSE;
162 func->unmap(vmm, pgd->pt[0], pdei, 1);
163 pgd->pde[pdei] = NULL;
166 /* Special handling for Tesla-class GPUs,
167 * where there's no central PD, but each
168 * instance has its own embedded PD.
170 func->pde(vmm, pgd, pdei);
171 pgd->pde[pdei] = NULL;
174 /* PDE was pointing at dual-PTs and we're removing
175 * one of them, leaving the other in place.
177 func->pde(vmm, pgd, pdei);
180 /* GPU may have cached the PTs, flush before freeing. */
181 nvkm_vmm_flush_mark(it);
184 /* PD has no valid PDEs left, so we can just destroy it. */
185 nvkm_vmm_unref_pdes(it);
189 TRA(it, "PDE free %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
190 nvkm_mmu_ptc_put(vmm->mmu, vmm->bootstrapped, &pt);
191 if (!pgt->refs[!type])
192 nvkm_vmm_pt_del(&pgt);
197 nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
198 const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
200 const struct nvkm_vmm_desc *pair = it->page[-1].desc;
201 const u32 sptb = desc->bits - pair->bits;
202 const u32 sptn = 1 << sptb;
203 struct nvkm_vmm *vmm = it->vmm;
204 u32 spti = ptei & (sptn - 1), lpti, pteb;
206 /* Determine how many SPTEs are being touched under each LPTE,
207 * and drop reference counts.
209 for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
210 const u32 pten = min(sptn - spti, ptes);
211 pgt->pte[lpti] -= pten;
215 /* We're done here if there's no corresponding LPT. */
219 for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
220 /* Skip over any LPTEs that still have valid SPTEs. */
221 if (pgt->pte[pteb] & NVKM_VMM_PTE_SPTES) {
222 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
223 if (!(pgt->pte[ptei] & NVKM_VMM_PTE_SPTES))
229 /* As there's no more non-UNMAPPED SPTEs left in the range
230 * covered by a number of LPTEs, the LPTEs once again take
231 * control over their address range.
233 * Determine how many LPTEs need to transition state.
235 pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
236 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
237 if (pgt->pte[ptei] & NVKM_VMM_PTE_SPTES)
239 pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
242 if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
243 TRA(it, "LPTE %05x: U -> S %d PTEs", pteb, ptes);
244 pair->func->sparse(vmm, pgt->pt[0], pteb, ptes);
246 if (pair->func->invalid) {
247 /* If the MMU supports it, restore the LPTE to the
248 * INVALID state to tell the MMU there is no point
249 * trying to fetch the corresponding SPTEs.
251 TRA(it, "LPTE %05x: U -> I %d PTEs", pteb, ptes);
252 pair->func->invalid(vmm, pgt->pt[0], pteb, ptes);
258 nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
260 const struct nvkm_vmm_desc *desc = it->desc;
261 const int type = desc->type == SPT;
262 struct nvkm_vmm_pt *pgt = it->pt[0];
266 /* Need to clear PTE valid bits before we dma_unmap_page(). */
267 dma = desc->func->pfn_clear(it->vmm, pgt->pt[type], ptei, ptes);
269 /* GPU may have cached the PT, flush before unmap. */
270 nvkm_vmm_flush_mark(it);
272 desc->func->pfn_unmap(it->vmm, pgt->pt[type], ptei, ptes);
276 /* Drop PTE references. */
277 pgt->refs[type] -= ptes;
279 /* Dual-PTs need special handling, unless PDE becoming invalid. */
280 if (desc->type == SPT && (pgt->refs[0] || pgt->refs[1]))
281 nvkm_vmm_unref_sptes(it, pgt, desc, ptei, ptes);
283 /* PT no longer neeed? Destroy it. */
284 if (!pgt->refs[type]) {
286 TRA(it, "%s empty", nvkm_vmm_desc_type(desc));
288 nvkm_vmm_unref_pdes(it);
289 return false; /* PTE writes for unmap() not necessary. */
296 nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
297 const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
299 const struct nvkm_vmm_desc *pair = it->page[-1].desc;
300 const u32 sptb = desc->bits - pair->bits;
301 const u32 sptn = 1 << sptb;
302 struct nvkm_vmm *vmm = it->vmm;
303 u32 spti = ptei & (sptn - 1), lpti, pteb;
305 /* Determine how many SPTEs are being touched under each LPTE,
306 * and increase reference counts.
308 for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
309 const u32 pten = min(sptn - spti, ptes);
310 pgt->pte[lpti] += pten;
314 /* We're done here if there's no corresponding LPT. */
318 for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
319 /* Skip over any LPTEs that already have valid SPTEs. */
320 if (pgt->pte[pteb] & NVKM_VMM_PTE_VALID) {
321 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
322 if (!(pgt->pte[ptei] & NVKM_VMM_PTE_VALID))
328 /* As there are now non-UNMAPPED SPTEs in the range covered
329 * by a number of LPTEs, we need to transfer control of the
330 * address range to the SPTEs.
332 * Determine how many LPTEs need to transition state.
334 pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
335 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
336 if (pgt->pte[ptei] & NVKM_VMM_PTE_VALID)
338 pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
341 if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
342 const u32 spti = pteb * sptn;
343 const u32 sptc = ptes * sptn;
344 /* The entire LPTE is marked as sparse, we need
345 * to make sure that the SPTEs are too.
347 TRA(it, "SPTE %05x: U -> S %d PTEs", spti, sptc);
348 desc->func->sparse(vmm, pgt->pt[1], spti, sptc);
349 /* Sparse LPTEs prevent SPTEs from being accessed. */
350 TRA(it, "LPTE %05x: S -> U %d PTEs", pteb, ptes);
351 pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
353 if (pair->func->invalid) {
354 /* MMU supports blocking SPTEs by marking an LPTE
355 * as INVALID. We need to reverse that here.
357 TRA(it, "LPTE %05x: I -> U %d PTEs", pteb, ptes);
358 pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
364 nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
366 const struct nvkm_vmm_desc *desc = it->desc;
367 const int type = desc->type == SPT;
368 struct nvkm_vmm_pt *pgt = it->pt[0];
370 /* Take PTE references. */
371 pgt->refs[type] += ptes;
373 /* Dual-PTs need special handling. */
374 if (desc->type == SPT)
375 nvkm_vmm_ref_sptes(it, pgt, desc, ptei, ptes);
381 nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc,
382 struct nvkm_vmm_pt *pgt, u32 ptei, u32 ptes)
384 if (desc->type == PGD) {
386 pgt->pde[ptei++] = NVKM_VMM_PDE_SPARSE;
388 if (desc->type == LPT) {
389 memset(&pgt->pte[ptei], NVKM_VMM_PTE_SPARSE, ptes);
394 nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
396 struct nvkm_vmm_pt *pt = it->pt[0];
397 if (it->desc->type == PGD)
398 memset(&pt->pde[ptei], 0x00, sizeof(pt->pde[0]) * ptes);
400 if (it->desc->type == LPT)
401 memset(&pt->pte[ptei], 0x00, sizeof(pt->pte[0]) * ptes);
402 return nvkm_vmm_unref_ptes(it, pfn, ptei, ptes);
406 nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
408 nvkm_vmm_sparse_ptes(it->desc, it->pt[0], ptei, ptes);
409 return nvkm_vmm_ref_ptes(it, pfn, ptei, ptes);
413 nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
415 const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
416 const int type = desc->type == SPT;
417 struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
418 const bool zero = !pgt->sparse && !desc->func->invalid;
419 struct nvkm_vmm *vmm = it->vmm;
420 struct nvkm_mmu *mmu = vmm->mmu;
421 struct nvkm_mmu_pt *pt;
422 u32 pten = 1 << desc->bits;
423 u32 pteb, ptei, ptes;
424 u32 size = desc->size * pten;
428 pgt->pt[type] = nvkm_mmu_ptc_get(mmu, size, desc->align, zero);
429 if (!pgt->pt[type]) {
431 nvkm_vmm_unref_pdes(it);
440 if (desc->type == LPT && pgt->refs[1]) {
441 /* SPT already exists covering the same range as this LPT,
442 * which means we need to be careful that any LPTEs which
443 * overlap valid SPTEs are unmapped as opposed to invalid
444 * or sparse, which would prevent the MMU from looking at
445 * the SPTEs on some GPUs.
447 for (ptei = pteb = 0; ptei < pten; pteb = ptei) {
448 bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
449 for (ptes = 1, ptei++; ptei < pten; ptes++, ptei++) {
450 bool next = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
457 desc->func->sparse(vmm, pt, pteb, ptes);
459 desc->func->invalid(vmm, pt, pteb, ptes);
460 memset(&pgt->pte[pteb], 0x00, ptes);
462 desc->func->unmap(vmm, pt, pteb, ptes);
464 pgt->pte[pteb++] |= NVKM_VMM_PTE_VALID;
469 nvkm_vmm_sparse_ptes(desc, pgt, 0, pten);
470 desc->func->sparse(vmm, pt, 0, pten);
472 desc->func->invalid(vmm, pt, 0, pten);
477 TRA(it, "PDE write %s", nvkm_vmm_desc_type(desc));
478 it->desc[it->lvl].func->pde(it->vmm, pgd, pdei);
479 nvkm_vmm_flush_mark(it);
484 nvkm_vmm_ref_swpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
486 const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
487 struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
489 pgt = nvkm_vmm_pt_new(desc, NVKM_VMM_PDE_SPARSED(pgt), it->page);
492 nvkm_vmm_unref_pdes(it);
496 pgd->pde[pdei] = pgt;
501 nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
502 u64 addr, u64 size, const char *name, bool ref, bool pfn,
503 bool (*REF_PTES)(struct nvkm_vmm_iter *, bool pfn, u32, u32),
504 nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map,
505 nvkm_vmm_pxe_func CLR_PTES)
507 const struct nvkm_vmm_desc *desc = page->desc;
508 struct nvkm_vmm_iter it;
509 u64 bits = addr >> page->shift;
514 it.cnt = size >> page->shift;
515 it.flush = NVKM_VMM_LEVELS_MAX;
517 /* Deconstruct address into PTE indices for each mapping level. */
518 for (it.lvl = 0; desc[it.lvl].bits; it.lvl++) {
519 it.pte[it.lvl] = bits & ((1 << desc[it.lvl].bits) - 1);
520 bits >>= desc[it.lvl].bits;
523 it.pt[it.max] = vmm->pd;
526 TRA(&it, "%s: %016llx %016llx %d %lld PTEs", name,
527 addr, size, page->shift, it.cnt);
530 /* Depth-first traversal of page tables. */
532 struct nvkm_vmm_pt *pgt = it.pt[it.lvl];
533 const int type = desc->type == SPT;
534 const u32 pten = 1 << desc->bits;
535 const u32 ptei = it.pte[0];
536 const u32 ptes = min_t(u64, it.cnt, pten - ptei);
538 /* Walk down the tree, finding page tables for each level. */
539 for (; it.lvl; it.lvl--) {
540 const u32 pdei = it.pte[it.lvl];
541 struct nvkm_vmm_pt *pgd = pgt;
544 if (ref && NVKM_VMM_PDE_INVALID(pgd->pde[pdei])) {
545 if (!nvkm_vmm_ref_swpt(&it, pgd, pdei))
548 it.pt[it.lvl - 1] = pgt = pgd->pde[pdei];
552 * This is a separate step from above due to GF100 and
553 * newer having dual page tables at some levels, which
554 * are refcounted independently.
556 if (ref && !pgt->refs[desc[it.lvl - 1].type == SPT]) {
557 if (!nvkm_vmm_ref_hwpt(&it, pgd, pdei))
562 /* Handle PTE updates. */
563 if (!REF_PTES || REF_PTES(&it, pfn, ptei, ptes)) {
564 struct nvkm_mmu_pt *pt = pgt->pt[type];
565 if (MAP_PTES || CLR_PTES) {
567 MAP_PTES(vmm, pt, ptei, ptes, map);
569 CLR_PTES(vmm, pt, ptei, ptes);
570 nvkm_vmm_flush_mark(&it);
574 /* Walk back up the tree to the next position. */
575 it.pte[it.lvl] += ptes;
578 while (it.pte[it.lvl] == (1 << desc[it.lvl].bits)) {
579 it.pte[it.lvl++] = 0;
589 /* Reconstruct the failure address so the caller is able to
590 * reverse any partially completed operations.
592 addr = it.pte[it.max--];
594 addr = addr << desc[it.max].bits;
595 addr |= it.pte[it.max];
598 return addr << page->shift;
602 nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
605 nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, false,
606 nvkm_vmm_sparse_unref_ptes, NULL, NULL,
607 page->desc->func->invalid ?
608 page->desc->func->invalid : page->desc->func->unmap);
612 nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
615 if ((page->type & NVKM_VMM_PAGE_SPARSE)) {
616 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref",
617 true, false, nvkm_vmm_sparse_ref_ptes,
618 NULL, NULL, page->desc->func->sparse);
620 if ((size = fail - addr))
621 nvkm_vmm_ptes_sparse_put(vmm, page, addr, size);
630 nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
632 const struct nvkm_vmm_page *page = vmm->func->page;
638 /* Limit maximum page size based on remaining size. */
639 while (size < (1ULL << page[m].shift))
643 /* Find largest page size suitable for alignment. */
644 while (!IS_ALIGNED(addr, 1ULL << page[i].shift))
647 /* Determine number of PTEs at this page size. */
649 /* Limited to alignment boundary of next page size. */
650 u64 next = 1ULL << page[i - 1].shift;
651 u64 part = ALIGN(addr, next) - addr;
652 if (size - part >= next)
653 block = (part >> page[i].shift) << page[i].shift;
655 block = (size >> page[i].shift) << page[i].shift;
657 block = (size >> page[i].shift) << page[i].shift;
660 /* Perform operation. */
662 int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block);
664 if ((size = addr - start))
665 nvkm_vmm_ptes_sparse(vmm, start, size, false);
669 nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block);
680 nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
681 u64 addr, u64 size, bool sparse, bool pfn)
683 const struct nvkm_vmm_desc_func *func = page->desc->func;
684 nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
685 false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
686 sparse ? func->sparse : func->invalid ? func->invalid :
691 nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
692 u64 addr, u64 size, struct nvkm_vmm_map *map,
693 nvkm_vmm_pte_func func)
695 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
696 false, nvkm_vmm_ref_ptes, func, map, NULL);
698 if ((size = fail - addr))
699 nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
706 nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
707 u64 addr, u64 size, bool sparse, bool pfn)
709 const struct nvkm_vmm_desc_func *func = page->desc->func;
710 nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
712 sparse ? func->sparse : func->invalid ? func->invalid :
717 nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
718 u64 addr, u64 size, struct nvkm_vmm_map *map,
719 nvkm_vmm_pte_func func)
721 nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
722 NULL, func, map, NULL);
726 nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
729 nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
730 nvkm_vmm_unref_ptes, NULL, NULL, NULL);
734 nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
737 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
738 nvkm_vmm_ref_ptes, NULL, NULL, NULL);
741 nvkm_vmm_ptes_put(vmm, page, addr, fail - addr);
747 static inline struct nvkm_vma *
748 nvkm_vma_new(u64 addr, u64 size)
750 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
754 vma->page = NVKM_VMA_PAGE_NONE;
755 vma->refd = NVKM_VMA_PAGE_NONE;
761 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
763 struct nvkm_vma *new;
765 BUG_ON(vma->size == tail);
767 if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail)))
771 new->mapref = vma->mapref;
772 new->sparse = vma->sparse;
773 new->page = vma->page;
774 new->refd = vma->refd;
775 new->used = vma->used;
776 new->part = vma->part;
777 new->user = vma->user;
778 new->busy = vma->busy;
779 new->mapped = vma->mapped;
780 list_add(&new->head, &vma->head);
785 nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
787 rb_erase(&vma->tree, &vmm->free);
791 nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
793 nvkm_vmm_free_remove(vmm, vma);
794 list_del(&vma->head);
799 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
801 struct rb_node **ptr = &vmm->free.rb_node;
802 struct rb_node *parent = NULL;
805 struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
807 if (vma->size < this->size)
808 ptr = &parent->rb_left;
810 if (vma->size > this->size)
811 ptr = &parent->rb_right;
813 if (vma->addr < this->addr)
814 ptr = &parent->rb_left;
816 if (vma->addr > this->addr)
817 ptr = &parent->rb_right;
822 rb_link_node(&vma->tree, parent, ptr);
823 rb_insert_color(&vma->tree, &vmm->free);
827 nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
829 rb_erase(&vma->tree, &vmm->root);
833 nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
835 nvkm_vmm_node_remove(vmm, vma);
836 list_del(&vma->head);
841 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
843 struct rb_node **ptr = &vmm->root.rb_node;
844 struct rb_node *parent = NULL;
847 struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
849 if (vma->addr < this->addr)
850 ptr = &parent->rb_left;
852 if (vma->addr > this->addr)
853 ptr = &parent->rb_right;
858 rb_link_node(&vma->tree, parent, ptr);
859 rb_insert_color(&vma->tree, &vmm->root);
863 nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
865 struct rb_node *node = vmm->root.rb_node;
867 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
868 if (addr < vma->addr)
869 node = node->rb_left;
871 if (addr >= vma->addr + vma->size)
872 node = node->rb_right;
879 #define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL : \
880 list_entry((root)->head.dir, struct nvkm_vma, head))
882 static struct nvkm_vma *
883 nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev,
884 struct nvkm_vma *vma, struct nvkm_vma *next, u64 size)
887 if (vma->size == size) {
888 vma->size += next->size;
889 nvkm_vmm_node_delete(vmm, next);
891 prev->size += vma->size;
892 nvkm_vmm_node_delete(vmm, vma);
899 nvkm_vmm_node_remove(vmm, next);
903 nvkm_vmm_node_insert(vmm, next);
908 if (vma->size != size) {
909 nvkm_vmm_node_remove(vmm, vma);
913 nvkm_vmm_node_insert(vmm, vma);
915 prev->size += vma->size;
916 nvkm_vmm_node_delete(vmm, vma);
925 nvkm_vmm_node_split(struct nvkm_vmm *vmm,
926 struct nvkm_vma *vma, u64 addr, u64 size)
928 struct nvkm_vma *prev = NULL;
930 if (vma->addr != addr) {
932 if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr)))
935 nvkm_vmm_node_insert(vmm, vma);
938 if (vma->size != size) {
939 struct nvkm_vma *tmp;
940 if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
941 nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
945 nvkm_vmm_node_insert(vmm, tmp);
952 nvkm_vma_dump(struct nvkm_vma *vma)
954 printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c%c %p\n",
955 vma->addr, (u64)vma->size,
956 vma->used ? '-' : 'F',
957 vma->mapref ? 'R' : '-',
958 vma->sparse ? 'S' : '-',
959 vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
960 vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
961 vma->part ? 'P' : '-',
962 vma->user ? 'U' : '-',
963 vma->busy ? 'B' : '-',
964 vma->mapped ? 'M' : '-',
969 nvkm_vmm_dump(struct nvkm_vmm *vmm)
971 struct nvkm_vma *vma;
972 list_for_each_entry(vma, &vmm->list, head) {
978 nvkm_vmm_dtor(struct nvkm_vmm *vmm)
980 struct nvkm_vma *vma;
981 struct rb_node *node;
986 while ((node = rb_first(&vmm->root))) {
987 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
988 nvkm_vmm_put(vmm, &vma);
991 if (vmm->bootstrapped) {
992 const struct nvkm_vmm_page *page = vmm->func->page;
993 const u64 limit = vmm->limit - vmm->start;
995 while (page[1].shift)
998 nvkm_mmu_ptc_dump(vmm->mmu);
999 nvkm_vmm_ptes_put(vmm, page, vmm->start, limit);
1002 vma = list_first_entry(&vmm->list, typeof(*vma), head);
1003 list_del(&vma->head);
1005 WARN_ON(!list_empty(&vmm->list));
1008 dma_free_coherent(vmm->mmu->subdev.device->dev, 16 * 1024,
1009 vmm->nullp, vmm->null);
1013 nvkm_mmu_ptc_put(vmm->mmu, true, &vmm->pd->pt[0]);
1014 nvkm_vmm_pt_del(&vmm->pd);
1019 nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
1021 struct nvkm_vma *vma;
1022 if (!(vma = nvkm_vma_new(addr, size)))
1025 vma->sparse = false;
1028 nvkm_vmm_node_insert(vmm, vma);
1029 list_add_tail(&vma->head, &vmm->list);
1034 nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
1035 u32 pd_header, bool managed, u64 addr, u64 size,
1036 struct lock_class_key *key, const char *name,
1037 struct nvkm_vmm *vmm)
1039 static struct lock_class_key _key;
1040 const struct nvkm_vmm_page *page = func->page;
1041 const struct nvkm_vmm_desc *desc;
1042 struct nvkm_vma *vma;
1043 int levels, bits = 0, ret;
1048 vmm->debug = mmu->subdev.debug;
1049 kref_init(&vmm->kref);
1051 __mutex_init(&vmm->mutex, "&vmm->mutex", key ? key : &_key);
1053 /* Locate the smallest page size supported by the backend, it will
1054 * have the the deepest nesting of page tables.
1056 while (page[1].shift)
1059 /* Locate the structure that describes the layout of the top-level
1060 * page table, and determine the number of valid bits in a virtual
1063 for (levels = 0, desc = page->desc; desc->bits; desc++, levels++)
1065 bits += page->shift;
1068 if (WARN_ON(levels > NVKM_VMM_LEVELS_MAX))
1071 /* Allocate top-level page table. */
1072 vmm->pd = nvkm_vmm_pt_new(desc, false, NULL);
1075 vmm->pd->refs[0] = 1;
1076 INIT_LIST_HEAD(&vmm->join);
1078 /* ... and the GPU storage for it, except on Tesla-class GPUs that
1079 * have the PD embedded in the instance structure.
1082 const u32 size = pd_header + desc->size * (1 << desc->bits);
1083 vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true);
1084 if (!vmm->pd->pt[0])
1088 /* Initialise address-space MM. */
1089 INIT_LIST_HEAD(&vmm->list);
1090 vmm->free = RB_ROOT;
1091 vmm->root = RB_ROOT;
1094 /* Address-space will be managed by the client for the most
1095 * part, except for a specified area where NVKM allocations
1096 * are allowed to be placed.
1099 vmm->limit = 1ULL << bits;
1100 if (addr + size < addr || addr + size > vmm->limit)
1103 /* Client-managed area before the NVKM-managed area. */
1104 if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr)))
1107 /* NVKM-managed area. */
1109 if (!(vma = nvkm_vma_new(addr, size)))
1111 nvkm_vmm_free_insert(vmm, vma);
1112 list_add_tail(&vma->head, &vmm->list);
1115 /* Client-managed area after the NVKM-managed area. */
1117 size = vmm->limit - addr;
1118 if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size)))
1121 /* Address-space fully managed by NVKM, requiring calls to
1122 * nvkm_vmm_get()/nvkm_vmm_put() to allocate address-space.
1125 vmm->limit = size ? (addr + size) : (1ULL << bits);
1126 if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
1129 if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
1132 nvkm_vmm_free_insert(vmm, vma);
1133 list_add(&vma->head, &vmm->list);
1140 nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
1141 u32 hdr, bool managed, u64 addr, u64 size,
1142 struct lock_class_key *key, const char *name,
1143 struct nvkm_vmm **pvmm)
1145 if (!(*pvmm = kzalloc(sizeof(**pvmm), GFP_KERNEL)))
1147 return nvkm_vmm_ctor(func, mmu, hdr, managed, addr, size, key, name, *pvmm);
1150 static struct nvkm_vma *
1151 nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1152 u64 addr, u64 size, u8 page, bool map)
1154 struct nvkm_vma *prev = NULL;
1155 struct nvkm_vma *next = NULL;
1157 if (vma->addr == addr && vma->part && (prev = node(vma, prev))) {
1158 if (prev->memory || prev->mapped != map)
1162 if (vma->addr + vma->size == addr + size && (next = node(vma, next))) {
1164 next->memory || next->mapped != map)
1169 return nvkm_vmm_node_merge(vmm, prev, vma, next, size);
1170 return nvkm_vmm_node_split(vmm, vma, addr, size);
1174 nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
1176 struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr);
1177 struct nvkm_vma *next;
1178 u64 limit = addr + size;
1185 if (!vma->mapped || vma->memory)
1188 size = min(limit - start, vma->size - (start - vma->addr));
1190 nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd],
1191 start, size, false, true);
1193 next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false);
1194 if (!WARN_ON(!next)) {
1196 vma->refd = NVKM_VMA_PAGE_NONE;
1197 vma->mapped = false;
1199 } while ((vma = node(vma, next)) && (start = vma->addr) < limit);
1205 * - Avoid PT readback (for dma_unmap etc), this might end up being dealt
1206 * with inside HMM, which would be a lot nicer for us to deal with.
1207 * - Multiple page sizes (particularly for huge page support).
1208 * - Support for systems without a 4KiB page size.
1211 nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
1213 const struct nvkm_vmm_page *page = vmm->func->page;
1214 struct nvkm_vma *vma, *tmp;
1215 u64 limit = addr + size;
1217 int pm = size >> shift;
1220 /* Only support mapping where the page size of the incoming page
1221 * array matches a page size available for direct mapping.
1223 while (page->shift && page->shift != shift &&
1224 page->desc->func->pfn == NULL)
1227 if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) ||
1228 !IS_ALIGNED(size, 1ULL << shift) ||
1229 addr + size < addr || addr + size > vmm->limit) {
1230 VMM_DEBUG(vmm, "paged map %d %d %016llx %016llx\n",
1231 shift, page->shift, addr, size);
1235 if (!(vma = nvkm_vmm_node_search(vmm, addr)))
1239 bool map = !!(pfn[pi] & NVKM_VMM_PFN_V);
1240 bool mapped = vma->mapped;
1241 u64 size = limit - start;
1245 /* Narrow the operation window to cover a single action (page
1246 * should be mapped or not) within a single VMA.
1248 for (pn = 0; pi + pn < pm; pn++) {
1249 if (map != !!(pfn[pi + pn] & NVKM_VMM_PFN_V))
1252 size = min_t(u64, size, pn << page->shift);
1253 size = min_t(u64, size, vma->size + vma->addr - addr);
1255 /* Reject any operation to unmanaged regions, and areas that
1256 * have nvkm_memory objects mapped in them already.
1258 if (!vma->mapref || vma->memory) {
1263 /* In order to both properly refcount GPU page tables, and
1264 * prevent "normal" mappings and these direct mappings from
1265 * interfering with each other, we need to track contiguous
1266 * ranges that have been mapped with this interface.
1268 * Here we attempt to either split an existing VMA so we're
1269 * able to flag the region as either unmapped/mapped, or to
1270 * merge with adjacent VMAs that are already compatible.
1272 * If the region is already compatible, nothing is required.
1274 if (map != mapped) {
1275 tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size,
1277 vmm->func->page, map);
1278 if (WARN_ON(!tmp)) {
1283 if ((tmp->mapped = map))
1284 tmp->refd = page - vmm->func->page;
1286 tmp->refd = NVKM_VMA_PAGE_NONE;
1290 /* Update HW page tables. */
1292 struct nvkm_vmm_map args;
1294 args.pfn = &pfn[pi];
1297 ret = nvkm_vmm_ptes_get_map(vmm, page, addr,
1301 nvkm_vmm_ptes_map(vmm, page, addr, size, &args,
1302 page->desc->func->pfn);
1306 nvkm_vmm_ptes_unmap_put(vmm, page, addr, size,
1312 /* Iterate to next operation. */
1313 if (vma->addr + vma->size == addr + size)
1314 vma = node(vma, next);
1318 /* Failure is signalled by clearing the valid bit on
1319 * any PFN that couldn't be modified as requested.
1322 pfn[pi++] = NVKM_VMM_PFN_NONE;
1323 size -= 1 << page->shift;
1326 pi += size >> page->shift;
1328 } while (vma && start < limit);
1334 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1336 struct nvkm_vma *prev = NULL;
1337 struct nvkm_vma *next;
1339 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1340 nvkm_memory_unref(&vma->memory);
1341 vma->mapped = false;
1343 if (vma->part && (prev = node(vma, prev)) && prev->mapped)
1345 if ((next = node(vma, next)) && (!next->part || next->mapped))
1347 nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
1351 nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
1353 const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
1356 nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1357 vma->refd = NVKM_VMA_PAGE_NONE;
1359 nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1362 nvkm_vmm_unmap_region(vmm, vma);
1366 nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1369 mutex_lock(&vmm->mutex);
1370 nvkm_vmm_unmap_locked(vmm, vma, false);
1371 mutex_unlock(&vmm->mutex);
1376 nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1377 void *argv, u32 argc, struct nvkm_vmm_map *map)
1379 switch (nvkm_memory_target(map->memory)) {
1380 case NVKM_MEM_TARGET_VRAM:
1381 if (!(map->page->type & NVKM_VMM_PAGE_VRAM)) {
1382 VMM_DEBUG(vmm, "%d !VRAM", map->page->shift);
1386 case NVKM_MEM_TARGET_HOST:
1387 case NVKM_MEM_TARGET_NCOH:
1388 if (!(map->page->type & NVKM_VMM_PAGE_HOST)) {
1389 VMM_DEBUG(vmm, "%d !HOST", map->page->shift);
1398 if (!IS_ALIGNED( vma->addr, 1ULL << map->page->shift) ||
1399 !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) ||
1400 !IS_ALIGNED( map->offset, 1ULL << map->page->shift) ||
1401 nvkm_memory_page(map->memory) < map->page->shift) {
1402 VMM_DEBUG(vmm, "alignment %016llx %016llx %016llx %d %d",
1403 vma->addr, (u64)vma->size, map->offset, map->page->shift,
1404 nvkm_memory_page(map->memory));
1408 return vmm->func->valid(vmm, argv, argc, map);
1412 nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1413 void *argv, u32 argc, struct nvkm_vmm_map *map)
1415 for (map->page = vmm->func->page; map->page->shift; map->page++) {
1416 VMM_DEBUG(vmm, "trying %d", map->page->shift);
1417 if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map))
1424 nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1425 void *argv, u32 argc, struct nvkm_vmm_map *map)
1427 nvkm_vmm_pte_func func;
1430 /* Make sure we won't overrun the end of the memory object. */
1431 if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
1432 VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx",
1433 nvkm_memory_size(map->memory),
1434 map->offset, (u64)vma->size);
1438 /* Check remaining arguments for validity. */
1439 if (vma->page == NVKM_VMA_PAGE_NONE &&
1440 vma->refd == NVKM_VMA_PAGE_NONE) {
1441 /* Find the largest page size we can perform the mapping at. */
1442 const u32 debug = vmm->debug;
1444 ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1447 VMM_DEBUG(vmm, "invalid at any page size");
1448 nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1452 /* Page size of the VMA is already pre-determined. */
1453 if (vma->refd != NVKM_VMA_PAGE_NONE)
1454 map->page = &vmm->func->page[vma->refd];
1456 map->page = &vmm->func->page[vma->page];
1458 ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map);
1460 VMM_DEBUG(vmm, "invalid %d\n", ret);
1465 /* Deal with the 'offset' argument, and fetch the backend function. */
1466 map->off = map->offset;
1468 for (; map->off; map->mem = map->mem->next) {
1469 u64 size = (u64)map->mem->length << NVKM_RAM_MM_SHIFT;
1470 if (size > map->off)
1474 func = map->page->desc->func->mem;
1477 for (; map->off; map->sgl = sg_next(map->sgl)) {
1478 u64 size = sg_dma_len(map->sgl);
1479 if (size > map->off)
1483 func = map->page->desc->func->sgl;
1485 map->dma += map->offset >> PAGE_SHIFT;
1486 map->off = map->offset & PAGE_MASK;
1487 func = map->page->desc->func->dma;
1490 /* Perform the map. */
1491 if (vma->refd == NVKM_VMA_PAGE_NONE) {
1492 ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
1496 vma->refd = map->page - vmm->func->page;
1498 nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
1501 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1502 nvkm_memory_unref(&vma->memory);
1503 vma->memory = nvkm_memory_ref(map->memory);
1505 vma->tags = map->tags;
1510 nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
1511 struct nvkm_vmm_map *map)
1514 mutex_lock(&vmm->mutex);
1515 ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
1517 mutex_unlock(&vmm->mutex);
1522 nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1524 struct nvkm_vma *prev, *next;
1526 if ((prev = node(vma, prev)) && !prev->used) {
1527 vma->addr = prev->addr;
1528 vma->size += prev->size;
1529 nvkm_vmm_free_delete(vmm, prev);
1532 if ((next = node(vma, next)) && !next->used) {
1533 vma->size += next->size;
1534 nvkm_vmm_free_delete(vmm, next);
1537 nvkm_vmm_free_insert(vmm, vma);
1541 nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1543 const struct nvkm_vmm_page *page = vmm->func->page;
1544 struct nvkm_vma *next = vma;
1548 if (vma->mapref || !vma->sparse) {
1550 const bool mem = next->memory != NULL;
1551 const bool map = next->mapped;
1552 const u8 refd = next->refd;
1553 const u64 addr = next->addr;
1554 u64 size = next->size;
1556 /* Merge regions that are in the same state. */
1557 while ((next = node(next, next)) && next->part &&
1558 (next->mapped == map) &&
1559 (next->memory != NULL) == mem &&
1560 (next->refd == refd))
1564 /* Region(s) are mapped, merge the unmap
1565 * and dereference into a single walk of
1568 nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr,
1572 if (refd != NVKM_VMA_PAGE_NONE) {
1573 /* Drop allocation-time PTE references. */
1574 nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
1576 } while (next && next->part);
1579 /* Merge any mapped regions that were split from the initial
1580 * address-space allocation back into the allocated VMA, and
1581 * release memory/compression resources.
1586 nvkm_vmm_unmap_region(vmm, next);
1587 } while ((next = node(vma, next)) && next->part);
1589 if (vma->sparse && !vma->mapref) {
1590 /* Sparse region that was allocated with a fixed page size,
1591 * meaning all relevant PTEs were referenced once when the
1592 * region was allocated, and remained that way, regardless
1593 * of whether memory was mapped into it afterwards.
1595 * The process of unmapping, unsparsing, and dereferencing
1596 * PTEs can be done in a single page tree walk.
1598 nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size);
1601 /* Sparse region that wasn't allocated with a fixed page size,
1602 * PTE references were taken both at allocation time (to make
1603 * the GPU see the region as sparse), and when mapping memory
1606 * The latter was handled above, and the remaining references
1607 * are dealt with here.
1609 nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false);
1612 /* Remove VMA from the list of allocated nodes. */
1613 nvkm_vmm_node_remove(vmm, vma);
1615 /* Merge VMA back into the free list. */
1616 vma->page = NVKM_VMA_PAGE_NONE;
1617 vma->refd = NVKM_VMA_PAGE_NONE;
1620 nvkm_vmm_put_region(vmm, vma);
1624 nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
1626 struct nvkm_vma *vma = *pvma;
1628 mutex_lock(&vmm->mutex);
1629 nvkm_vmm_put_locked(vmm, vma);
1630 mutex_unlock(&vmm->mutex);
1636 nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
1637 u8 shift, u8 align, u64 size, struct nvkm_vma **pvma)
1639 const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE];
1640 struct rb_node *node = NULL, *temp;
1641 struct nvkm_vma *vma = NULL, *tmp;
1645 VMM_TRACE(vmm, "getref %d mapref %d sparse %d "
1646 "shift: %d align: %d size: %016llx",
1647 getref, mapref, sparse, shift, align, size);
1649 /* Zero-sized, or lazily-allocated sparse VMAs, make no sense. */
1650 if (unlikely(!size || (!getref && !mapref && sparse))) {
1651 VMM_DEBUG(vmm, "args %016llx %d %d %d",
1652 size, getref, mapref, sparse);
1656 /* Tesla-class GPUs can only select page size per-PDE, which means
1657 * we're required to know the mapping granularity up-front to find
1658 * a suitable region of address-space.
1660 * The same goes if we're requesting up-front allocation of PTES.
1662 if (unlikely((getref || vmm->func->page_block) && !shift)) {
1663 VMM_DEBUG(vmm, "page size required: %d %016llx",
1664 getref, vmm->func->page_block);
1668 /* If a specific page size was requested, determine its index and
1669 * make sure the requested size is a multiple of the page size.
1672 for (page = vmm->func->page; page->shift; page++) {
1673 if (shift == page->shift)
1677 if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
1678 VMM_DEBUG(vmm, "page %d %016llx", shift, size);
1681 align = max_t(u8, align, shift);
1683 align = max_t(u8, align, 12);
1686 /* Locate smallest block that can possibly satisfy the allocation. */
1687 temp = vmm->free.rb_node;
1689 struct nvkm_vma *this = rb_entry(temp, typeof(*this), tree);
1690 if (this->size < size) {
1691 temp = temp->rb_right;
1694 temp = temp->rb_left;
1698 if (unlikely(!node))
1701 /* Take into account alignment restrictions, trying larger blocks
1702 * in turn until we find a suitable free block.
1705 struct nvkm_vma *this = rb_entry(node, typeof(*this), tree);
1706 struct nvkm_vma *prev = node(this, prev);
1707 struct nvkm_vma *next = node(this, next);
1708 const int p = page - vmm->func->page;
1711 if (vmm->func->page_block && prev && prev->page != p)
1712 addr = ALIGN(addr, vmm->func->page_block);
1713 addr = ALIGN(addr, 1ULL << align);
1715 tail = this->addr + this->size;
1716 if (vmm->func->page_block && next && next->page != p)
1717 tail = ALIGN_DOWN(tail, vmm->func->page_block);
1719 if (addr <= tail && tail - addr >= size) {
1720 nvkm_vmm_free_remove(vmm, this);
1724 } while ((node = rb_next(node)));
1729 /* If the VMA we found isn't already exactly the requested size,
1730 * it needs to be split, and the remaining free blocks returned.
1732 if (addr != vma->addr) {
1733 if (!(tmp = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) {
1734 nvkm_vmm_put_region(vmm, vma);
1737 nvkm_vmm_free_insert(vmm, vma);
1741 if (size != vma->size) {
1742 if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
1743 nvkm_vmm_put_region(vmm, vma);
1746 nvkm_vmm_free_insert(vmm, tmp);
1749 /* Pre-allocate page tables and/or setup sparse mappings. */
1750 if (sparse && getref)
1751 ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size);
1753 ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true);
1755 ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size);
1759 nvkm_vmm_put_region(vmm, vma);
1763 vma->mapref = mapref && !getref;
1764 vma->sparse = sparse;
1765 vma->page = page - vmm->func->page;
1766 vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE;
1768 nvkm_vmm_node_insert(vmm, vma);
1774 nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
1777 mutex_lock(&vmm->mutex);
1778 ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
1779 mutex_unlock(&vmm->mutex);
1784 nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
1786 if (inst && vmm && vmm->func->part) {
1787 mutex_lock(&vmm->mutex);
1788 vmm->func->part(vmm, inst);
1789 mutex_unlock(&vmm->mutex);
1794 nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
1797 if (vmm->func->join) {
1798 mutex_lock(&vmm->mutex);
1799 ret = vmm->func->join(vmm, inst);
1800 mutex_unlock(&vmm->mutex);
1806 nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
1808 const struct nvkm_vmm_desc *desc = it->desc;
1809 const int type = desc->type == SPT;
1810 nvkm_memory_boot(it->pt[0]->pt[type]->memory, it->vmm);
1815 nvkm_vmm_boot(struct nvkm_vmm *vmm)
1817 const struct nvkm_vmm_page *page = vmm->func->page;
1818 const u64 limit = vmm->limit - vmm->start;
1821 while (page[1].shift)
1824 ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit);
1828 nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false,
1829 nvkm_vmm_boot_ptes, NULL, NULL, NULL);
1830 vmm->bootstrapped = true;
1835 nvkm_vmm_del(struct kref *kref)
1837 struct nvkm_vmm *vmm = container_of(kref, typeof(*vmm), kref);
1843 nvkm_vmm_unref(struct nvkm_vmm **pvmm)
1845 struct nvkm_vmm *vmm = *pvmm;
1847 kref_put(&vmm->kref, nvkm_vmm_del);
1853 nvkm_vmm_ref(struct nvkm_vmm *vmm)
1856 kref_get(&vmm->kref);
1861 nvkm_vmm_new(struct nvkm_device *device, u64 addr, u64 size, void *argv,
1862 u32 argc, struct lock_class_key *key, const char *name,
1863 struct nvkm_vmm **pvmm)
1865 struct nvkm_mmu *mmu = device->mmu;
1866 struct nvkm_vmm *vmm = NULL;
1868 ret = mmu->func->vmm.ctor(mmu, false, addr, size, argv, argc,
1871 nvkm_vmm_unref(&vmm);