]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
drm/nouveau/mmu: add a privileged method to directly manage PTEs
[linux.git] / drivers / gpu / drm / nouveau / nvkm / subdev / mmu / vmm.c
index 69b61e799fd404a594c371954b74baa3b4ae12b2..fa93f964e6a4db0b5a8320cfc135921e33f4521b 100644 (file)
@@ -255,11 +255,23 @@ nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
 }
 
 static bool
-nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
 {
        const struct nvkm_vmm_desc *desc = it->desc;
        const int type = desc->type == SPT;
        struct nvkm_vmm_pt *pgt = it->pt[0];
+       bool dma;
+
+       if (pfn) {
+               /* Need to clear PTE valid bits before we dma_unmap_page(). */
+               dma = desc->func->pfn_clear(it->vmm, pgt->pt[type], ptei, ptes);
+               if (dma) {
+                       /* GPU may have cached the PT, flush before unmap. */
+                       nvkm_vmm_flush_mark(it);
+                       nvkm_vmm_flush(it);
+                       desc->func->pfn_unmap(it->vmm, pgt->pt[type], ptei, ptes);
+               }
+       }
 
        /* Drop PTE references. */
        pgt->refs[type] -= ptes;
@@ -349,7 +361,7 @@ nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
 }
 
 static bool
-nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
 {
        const struct nvkm_vmm_desc *desc = it->desc;
        const int type = desc->type == SPT;
@@ -379,7 +391,7 @@ nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc,
 }
 
 static bool
-nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
 {
        struct nvkm_vmm_pt *pt = it->pt[0];
        if (it->desc->type == PGD)
@@ -387,14 +399,14 @@ nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
        else
        if (it->desc->type == LPT)
                memset(&pt->pte[ptei], 0x00, sizeof(pt->pte[0]) * ptes);
-       return nvkm_vmm_unref_ptes(it, ptei, ptes);
+       return nvkm_vmm_unref_ptes(it, pfn, ptei, ptes);
 }
 
 static bool
-nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
 {
        nvkm_vmm_sparse_ptes(it->desc, it->pt[0], ptei, ptes);
-       return nvkm_vmm_ref_ptes(it, ptei, ptes);
+       return nvkm_vmm_ref_ptes(it, pfn, ptei, ptes);
 }
 
 static bool
@@ -487,8 +499,8 @@ nvkm_vmm_ref_swpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
 
 static inline u64
 nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
-             u64 addr, u64 size, const char *name, bool ref,
-             bool (*REF_PTES)(struct nvkm_vmm_iter *, u32, u32),
+             u64 addr, u64 size, const char *name, bool ref, bool pfn,
+             bool (*REF_PTES)(struct nvkm_vmm_iter *, bool pfn, u32, u32),
              nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map,
              nvkm_vmm_pxe_func CLR_PTES)
 {
@@ -548,7 +560,7 @@ nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
                }
 
                /* Handle PTE updates. */
-               if (!REF_PTES || REF_PTES(&it, ptei, ptes)) {
+               if (!REF_PTES || REF_PTES(&it, pfn, ptei, ptes)) {
                        struct nvkm_mmu_pt *pt = pgt->pt[type];
                        if (MAP_PTES || CLR_PTES) {
                                if (MAP_PTES)
@@ -590,7 +602,7 @@ static void
 nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
                         u64 addr, u64 size)
 {
-       nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false,
+       nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, false,
                      nvkm_vmm_sparse_unref_ptes, NULL, NULL,
                      page->desc->func->invalid ?
                      page->desc->func->invalid : page->desc->func->unmap);
@@ -602,8 +614,8 @@ nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
 {
        if ((page->type & NVKM_VMM_PAGE_SPARSE)) {
                u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref",
-                                        true, nvkm_vmm_sparse_ref_ptes, NULL,
-                                        NULL, page->desc->func->sparse);
+                                        true, false, nvkm_vmm_sparse_ref_ptes,
+                                        NULL, NULL, page->desc->func->sparse);
                if (fail != ~0ULL) {
                        if ((size = fail - addr))
                                nvkm_vmm_ptes_sparse_put(vmm, page, addr, size);
@@ -666,11 +678,11 @@ nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
 
 static void
 nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
-                       u64 addr, u64 size, bool sparse)
+                       u64 addr, u64 size, bool sparse, bool pfn)
 {
        const struct nvkm_vmm_desc_func *func = page->desc->func;
        nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
-                     false, nvkm_vmm_unref_ptes, NULL, NULL,
+                     false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
                      sparse ? func->sparse : func->invalid ? func->invalid :
                                                              func->unmap);
 }
@@ -681,10 +693,10 @@ nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
                      nvkm_vmm_pte_func func)
 {
        u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
-                                nvkm_vmm_ref_ptes, func, map, NULL);
+                                false, nvkm_vmm_ref_ptes, func, map, NULL);
        if (fail != ~0ULL) {
                if ((size = fail - addr))
-                       nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false);
+                       nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
                return -ENOMEM;
        }
        return 0;
@@ -692,10 +704,11 @@ nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
 
 static void
 nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
-                   u64 addr, u64 size, bool sparse)
+                   u64 addr, u64 size, bool sparse, bool pfn)
 {
        const struct nvkm_vmm_desc_func *func = page->desc->func;
-       nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, NULL, NULL, NULL,
+       nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
+                     NULL, NULL, NULL,
                      sparse ? func->sparse : func->invalid ? func->invalid :
                                                              func->unmap);
 }
@@ -705,7 +718,7 @@ nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
                  u64 addr, u64 size, struct nvkm_vmm_map *map,
                  nvkm_vmm_pte_func func)
 {
-       nvkm_vmm_iter(vmm, page, addr, size, "map", false,
+       nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
                      NULL, func, map, NULL);
 }
 
@@ -713,7 +726,7 @@ static void
 nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
                  u64 addr, u64 size)
 {
-       nvkm_vmm_iter(vmm, page, addr, size, "unref", false,
+       nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
                      nvkm_vmm_unref_ptes, NULL, NULL, NULL);
 }
 
@@ -721,7 +734,7 @@ static int
 nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
                  u64 addr, u64 size)
 {
-       u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true,
+       u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
                                 nvkm_vmm_ref_ptes, NULL, NULL, NULL);
        if (fail != ~0ULL) {
                if (fail != addr)
@@ -935,12 +948,41 @@ nvkm_vmm_node_split(struct nvkm_vmm *vmm,
        return vma;
 }
 
+static void
+nvkm_vma_dump(struct nvkm_vma *vma)
+{
+       printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c%c %p\n",
+              vma->addr, (u64)vma->size,
+              vma->used ? '-' : 'F',
+              vma->mapref ? 'R' : '-',
+              vma->sparse ? 'S' : '-',
+              vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
+              vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
+              vma->part ? 'P' : '-',
+              vma->user ? 'U' : '-',
+              vma->busy ? 'B' : '-',
+              vma->mapped ? 'M' : '-',
+              vma->memory);
+}
+
+static void
+nvkm_vmm_dump(struct nvkm_vmm *vmm)
+{
+       struct nvkm_vma *vma;
+       list_for_each_entry(vma, &vmm->list, head) {
+               nvkm_vma_dump(vma);
+       }
+}
+
 static void
 nvkm_vmm_dtor(struct nvkm_vmm *vmm)
 {
        struct nvkm_vma *vma;
        struct rb_node *node;
 
+       if (0)
+               nvkm_vmm_dump(vmm);
+
        while ((node = rb_first(&vmm->root))) {
                struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
                nvkm_vmm_put(vmm, &vma);
@@ -1105,33 +1147,216 @@ nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
        return nvkm_vmm_ctor(func, mmu, hdr, managed, addr, size, key, name, *pvmm);
 }
 
+static struct nvkm_vma *
+nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+                        u64 addr, u64 size, u8 page, bool map)
+{
+       struct nvkm_vma *prev = NULL;
+       struct nvkm_vma *next = NULL;
+
+       if (vma->addr == addr && vma->part && (prev = node(vma, prev))) {
+               if (prev->memory || prev->mapped != map)
+                       prev = NULL;
+       }
+
+       if (vma->addr + vma->size == addr + size && (next = node(vma, next))) {
+               if (!next->part ||
+                   next->memory || next->mapped != map)
+                       next = NULL;
+       }
+
+       if (prev || next)
+               return nvkm_vmm_node_merge(vmm, prev, vma, next, size);
+       return nvkm_vmm_node_split(vmm, vma, addr, size);
+}
+
+int
+nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
+{
+       struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr);
+       struct nvkm_vma *next;
+       u64 limit = addr + size;
+       u64 start = addr;
+
+       if (!vma)
+               return -EINVAL;
+
+       do {
+               if (!vma->mapped || vma->memory)
+                       continue;
+
+               size = min(limit - start, vma->size - (start - vma->addr));
+
+               nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd],
+                                       start, size, false, true);
+
+               next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false);
+               if (!WARN_ON(!next)) {
+                       vma = next;
+                       vma->refd = NVKM_VMA_PAGE_NONE;
+                       vma->mapped = false;
+               }
+       } while ((vma = node(vma, next)) && (start = vma->addr) < limit);
+
+       return 0;
+}
+
+/*TODO:
+ * - Avoid PT readback (for dma_unmap etc), this might end up being dealt
+ *   with inside HMM, which would be a lot nicer for us to deal with.
+ * - Multiple page sizes (particularly for huge page support).
+ * - Support for systems without a 4KiB page size.
+ */
+int
+nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
+{
+       const struct nvkm_vmm_page *page = vmm->func->page;
+       struct nvkm_vma *vma, *tmp;
+       u64 limit = addr + size;
+       u64 start = addr;
+       int pm = size >> shift;
+       int pi = 0;
+
+       /* Only support mapping where the page size of the incoming page
+        * array matches a page size available for direct mapping.
+        */
+       while (page->shift && page->shift != shift &&
+              page->desc->func->pfn == NULL)
+               page++;
+
+       if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) ||
+                           !IS_ALIGNED(size, 1ULL << shift) ||
+           addr + size < addr || addr + size > vmm->limit) {
+               VMM_DEBUG(vmm, "paged map %d %d %016llx %016llx\n",
+                         shift, page->shift, addr, size);
+               return -EINVAL;
+       }
+
+       if (!(vma = nvkm_vmm_node_search(vmm, addr)))
+               return -ENOENT;
+
+       do {
+               bool map = !!(pfn[pi] & NVKM_VMM_PFN_V);
+               bool mapped = vma->mapped;
+               u64 size = limit - start;
+               u64 addr = start;
+               int pn, ret = 0;
+
+               /* Narrow the operation window to cover a single action (page
+                * should be mapped or not) within a single VMA.
+                */
+               for (pn = 0; pi + pn < pm; pn++) {
+                       if (map != !!(pfn[pi + pn] & NVKM_VMM_PFN_V))
+                               break;
+               }
+               size = min_t(u64, size, pn << page->shift);
+               size = min_t(u64, size, vma->size + vma->addr - addr);
+
+               /* Reject any operation to unmanaged regions, and areas that
+                * have nvkm_memory objects mapped in them already.
+                */
+               if (!vma->mapref || vma->memory) {
+                       ret = -EINVAL;
+                       goto next;
+               }
+
+               /* In order to both properly refcount GPU page tables, and
+                * prevent "normal" mappings and these direct mappings from
+                * interfering with each other, we need to track contiguous
+                * ranges that have been mapped with this interface.
+                *
+                * Here we attempt to either split an existing VMA so we're
+                * able to flag the region as either unmapped/mapped, or to
+                * merge with adjacent VMAs that are already compatible.
+                *
+                * If the region is already compatible, nothing is required.
+                */
+               if (map != mapped) {
+                       tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size,
+                                                      page -
+                                                      vmm->func->page, map);
+                       if (WARN_ON(!tmp)) {
+                               ret = -ENOMEM;
+                               goto next;
+                       }
+
+                       if ((tmp->mapped = map))
+                               tmp->refd = page - vmm->func->page;
+                       else
+                               tmp->refd = NVKM_VMA_PAGE_NONE;
+                       vma = tmp;
+               }
+
+               /* Update HW page tables. */
+               if (map) {
+                       struct nvkm_vmm_map args;
+                       args.page = page;
+                       args.pfn = &pfn[pi];
+
+                       if (!mapped) {
+                               ret = nvkm_vmm_ptes_get_map(vmm, page, addr,
+                                                           size, &args, page->
+                                                           desc->func->pfn);
+                       } else {
+                               nvkm_vmm_ptes_map(vmm, page, addr, size, &args,
+                                                 page->desc->func->pfn);
+                       }
+               } else {
+                       if (mapped) {
+                               nvkm_vmm_ptes_unmap_put(vmm, page, addr, size,
+                                                       false, true);
+                       }
+               }
+
+next:
+               /* Iterate to next operation. */
+               if (vma->addr + vma->size == addr + size)
+                       vma = node(vma, next);
+               start += size;
+
+               if (ret) {
+                       /* Failure is signalled by clearing the valid bit on
+                        * any PFN that couldn't be modified as requested.
+                        */
+                       while (size) {
+                               pfn[pi++] = NVKM_VMM_PFN_NONE;
+                               size -= 1 << page->shift;
+                       }
+               } else {
+                       pi += size >> page->shift;
+               }
+       } while (vma && start < limit);
+
+       return 0;
+}
+
 void
 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 {
-       struct nvkm_vma *next = node(vma, next);
        struct nvkm_vma *prev = NULL;
+       struct nvkm_vma *next;
 
        nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
        nvkm_memory_unref(&vma->memory);
        vma->mapped = false;
 
-       if (!vma->part || ((prev = node(vma, prev)), prev->mapped))
+       if (vma->part && (prev = node(vma, prev)) && prev->mapped)
                prev = NULL;
-       if (!next->part || next->mapped)
+       if ((next = node(vma, next)) && (!next->part || next->mapped))
                next = NULL;
        nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
 }
 
 void
-nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
 {
        const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
 
        if (vma->mapref) {
-               nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse);
+               nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
                vma->refd = NVKM_VMA_PAGE_NONE;
        } else {
-               nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse);
+               nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
        }
 
        nvkm_vmm_unmap_region(vmm, vma);
@@ -1142,7 +1367,7 @@ nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 {
        if (vma->memory) {
                mutex_lock(&vmm->mutex);
-               nvkm_vmm_unmap_locked(vmm, vma);
+               nvkm_vmm_unmap_locked(vmm, vma, false);
                mutex_unlock(&vmm->mutex);
        }
 }
@@ -1341,7 +1566,8 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
                                 * the page tree.
                                 */
                                nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr,
-                                                       size, vma->sparse);
+                                                       size, vma->sparse,
+                                                       !mem);
                        } else
                        if (refd != NVKM_VMA_PAGE_NONE) {
                                /* Drop allocation-time PTE references. */
@@ -1577,7 +1803,7 @@ nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 }
 
 static bool
-nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
 {
        const struct nvkm_vmm_desc *desc = it->desc;
        const int type = desc->type == SPT;
@@ -1599,7 +1825,7 @@ nvkm_vmm_boot(struct nvkm_vmm *vmm)
        if (ret)
                return ret;
 
-       nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false,
+       nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false,
                      nvkm_vmm_boot_ptes, NULL, NULL, NULL);
        vmm->bootstrapped = true;
        return 0;