]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/nouveau/mmu/nv44: implement new vmm backend
authorBen Skeggs <bskeggs@redhat.com>
Tue, 31 Oct 2017 17:56:19 +0000 (03:56 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Thu, 2 Nov 2017 03:32:28 +0000 (13:32 +1000)
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c

index 48ca0cdf2acfa55e3acfdb75e37acc669849d5c0..2072139bff4dc0e9219b4bfbec20132ccb2cbbd7 100644 (file)
 #include "vmm.h"
 
 #include <core/option.h>
-#include <subdev/timer.h>
 
 #include <nvif/class.h>
 
 #define NV44_GART_SIZE (512 * 1024 * 1024)
-#define NV44_GART_PAGE (  4 * 1024)
-
-/*******************************************************************************
- * VM map/unmap callbacks
- ******************************************************************************/
-
-static void
-nv44_vm_fill(struct nvkm_memory *pgt, dma_addr_t null,
-            dma_addr_t *list, u32 pte, u32 cnt)
-{
-       u32 base = (pte << 2) & ~0x0000000f;
-       u32 tmp[4];
-
-       tmp[0] = nvkm_ro32(pgt, base + 0x0);
-       tmp[1] = nvkm_ro32(pgt, base + 0x4);
-       tmp[2] = nvkm_ro32(pgt, base + 0x8);
-       tmp[3] = nvkm_ro32(pgt, base + 0xc);
-
-       while (cnt--) {
-               u32 addr = list ? (*list++ >> 12) : (null >> 12);
-               switch (pte++ & 0x3) {
-               case 0:
-                       tmp[0] &= ~0x07ffffff;
-                       tmp[0] |= addr;
-                       break;
-               case 1:
-                       tmp[0] &= ~0xf8000000;
-                       tmp[0] |= addr << 27;
-                       tmp[1] &= ~0x003fffff;
-                       tmp[1] |= addr >> 5;
-                       break;
-               case 2:
-                       tmp[1] &= ~0xffc00000;
-                       tmp[1] |= addr << 22;
-                       tmp[2] &= ~0x0001ffff;
-                       tmp[2] |= addr >> 10;
-                       break;
-               case 3:
-                       tmp[2] &= ~0xfffe0000;
-                       tmp[2] |= addr << 17;
-                       tmp[3] &= ~0x00000fff;
-                       tmp[3] |= addr >> 15;
-                       break;
-               }
-       }
-
-       nvkm_wo32(pgt, base + 0x0, tmp[0]);
-       nvkm_wo32(pgt, base + 0x4, tmp[1]);
-       nvkm_wo32(pgt, base + 0x8, tmp[2]);
-       nvkm_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
-}
-
-static void
-nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
-              struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
-       u32 tmp[4];
-       int i;
-
-       nvkm_kmap(pgt);
-       if (pte & 3) {
-               u32  max = 4 - (pte & 3);
-               u32 part = (cnt > max) ? max : cnt;
-               nv44_vm_fill(pgt, vma->vm->null, list, pte, part);
-               pte  += part;
-               list += part;
-               cnt  -= part;
-       }
-
-       while (cnt >= 4) {
-               for (i = 0; i < 4; i++)
-                       tmp[i] = *list++ >> 12;
-               nvkm_wo32(pgt, pte++ * 4, tmp[0] >>  0 | tmp[1] << 27);
-               nvkm_wo32(pgt, pte++ * 4, tmp[1] >>  5 | tmp[2] << 22);
-               nvkm_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
-               nvkm_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
-               cnt -= 4;
-       }
-
-       if (cnt)
-               nv44_vm_fill(pgt, vma->vm->null, list, pte, cnt);
-       nvkm_done(pgt);
-}
-
-static void
-nv44_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
-       nvkm_kmap(pgt);
-       if (pte & 3) {
-               u32  max = 4 - (pte & 3);
-               u32 part = (cnt > max) ? max : cnt;
-               nv44_vm_fill(pgt, vma->vm->null, NULL, pte, part);
-               pte  += part;
-               cnt  -= part;
-       }
-
-       while (cnt >= 4) {
-               nvkm_wo32(pgt, pte++ * 4, 0x00000000);
-               nvkm_wo32(pgt, pte++ * 4, 0x00000000);
-               nvkm_wo32(pgt, pte++ * 4, 0x00000000);
-               nvkm_wo32(pgt, pte++ * 4, 0x00000000);
-               cnt -= 4;
-       }
-
-       if (cnt)
-               nv44_vm_fill(pgt, vma->vm->null, NULL, pte, cnt);
-       nvkm_done(pgt);
-}
-
-static void
-nv44_vm_flush(struct nvkm_vm *vm)
-{
-       struct nvkm_device *device = vm->mmu->subdev.device;
-       nvkm_wr32(device, 0x100814, vm->mmu->limit - NV44_GART_PAGE);
-       nvkm_wr32(device, 0x100808, 0x00000020);
-       nvkm_msec(device, 2000,
-               if (nvkm_rd32(device, 0x100808) & 0x00000001)
-                       break;
-       );
-       nvkm_wr32(device, 0x100808, 0x00000000);
-}
-
-/*******************************************************************************
- * MMU subdev
- ******************************************************************************/
-
-static int
-nv44_mmu_oneinit(struct nvkm_mmu *mmu)
-{
-       mmu->vmm->pgt[0].mem[0] = mmu->vmm->pd->pt[0]->memory;
-       mmu->vmm->pgt[0].refcount[0] = 1;
-       return 0;
-}
 
 static void
 nv44_mmu_init(struct nvkm_mmu *mmu)
 {
        struct nvkm_device *device = mmu->subdev.device;
-       struct nvkm_memory *gart = mmu->vmm->pgt[0].mem[0];
+       struct nvkm_memory *pt = mmu->vmm->pd->pt[0]->memory;
        u32 addr;
 
        /* calculate vram address of this PRAMIN block, object must be
@@ -175,11 +41,11 @@ nv44_mmu_init(struct nvkm_mmu *mmu)
         * of 512KiB for this to work correctly
         */
        addr  = nvkm_rd32(device, 0x10020c);
-       addr -= ((nvkm_memory_addr(gart) >> 19) + 1) << 19;
+       addr -= ((nvkm_memory_addr(pt) >> 19) + 1) << 19;
 
        nvkm_wr32(device, 0x100850, 0x80000000);
        nvkm_wr32(device, 0x100818, mmu->vmm->null);
-       nvkm_wr32(device, 0x100804, NV44_GART_SIZE);
+       nvkm_wr32(device, 0x100804, (nvkm_memory_size(pt) / 4) * 4096);
        nvkm_wr32(device, 0x100850, 0x00008000);
        nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
        nvkm_wr32(device, 0x100820, 0x00000000);
@@ -189,16 +55,12 @@ nv44_mmu_init(struct nvkm_mmu *mmu)
 
 static const struct nvkm_mmu_func
 nv44_mmu = {
-       .oneinit = nv44_mmu_oneinit,
        .init = nv44_mmu_init,
        .limit = NV44_GART_SIZE,
        .dma_bits = 39,
        .pgt_bits = 32 - 12,
        .spg_shift = 12,
        .lpg_shift = 12,
-       .map_sg = nv44_vm_map_sg,
-       .unmap = nv44_vm_unmap,
-       .flush = nv44_vm_flush,
        .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true },
 };
 
index 2b5704269ac99792b25eb106fdbda536f9e41820..b834e43523343772cea882c806bd5151d06f1986 100644 (file)
  */
 #include "vmm.h"
 
+#include <subdev/timer.h>
+
+static void
+nv44_vmm_pgt_fill(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+                 dma_addr_t *list, u32 ptei, u32 ptes)
+{
+       u32 pteo = (ptei << 2) & ~0x0000000f;
+       u32 tmp[4];
+
+       tmp[0] = nvkm_ro32(pt->memory, pteo + 0x0);
+       tmp[1] = nvkm_ro32(pt->memory, pteo + 0x4);
+       tmp[2] = nvkm_ro32(pt->memory, pteo + 0x8);
+       tmp[3] = nvkm_ro32(pt->memory, pteo + 0xc);
+
+       while (ptes--) {
+               u32 addr = (list ? *list++ : vmm->null) >> 12;
+               switch (ptei++ & 0x3) {
+               case 0:
+                       tmp[0] &= ~0x07ffffff;
+                       tmp[0] |= addr;
+                       break;
+               case 1:
+                       tmp[0] &= ~0xf8000000;
+                       tmp[0] |= addr << 27;
+                       tmp[1] &= ~0x003fffff;
+                       tmp[1] |= addr >> 5;
+                       break;
+               case 2:
+                       tmp[1] &= ~0xffc00000;
+                       tmp[1] |= addr << 22;
+                       tmp[2] &= ~0x0001ffff;
+                       tmp[2] |= addr >> 10;
+                       break;
+               case 3:
+                       tmp[2] &= ~0xfffe0000;
+                       tmp[2] |= addr << 17;
+                       tmp[3] &= ~0x00000fff;
+                       tmp[3] |= addr >> 15;
+                       break;
+               }
+       }
+
+       VMM_WO032(pt, vmm, pteo + 0x0, tmp[0]);
+       VMM_WO032(pt, vmm, pteo + 0x4, tmp[1]);
+       VMM_WO032(pt, vmm, pteo + 0x8, tmp[2]);
+       VMM_WO032(pt, vmm, pteo + 0xc, tmp[3] | 0x40000000);
+}
+
+static void
+nv44_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+                u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+       dma_addr_t tmp[4], i;
+
+       if (ptei & 3) {
+               const u32 pten = min(ptes, 4 - (ptei & 3));
+               for (i = 0; i < pten; i++, addr += 0x1000)
+                       tmp[i] = addr;
+               nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, pten);
+               ptei += pten;
+               ptes -= pten;
+       }
+
+       while (ptes >= 4) {
+               for (i = 0; i < 4; i++, addr += 0x1000)
+                       tmp[i] = addr >> 12;
+               VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >>  0 | tmp[1] << 27);
+               VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >>  5 | tmp[2] << 22);
+               VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+               VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
+               ptes -= 4;
+       }
+
+       if (ptes) {
+               for (i = 0; i < ptes; i++, addr += 0x1000)
+                       tmp[i] = addr;
+               nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, ptes);
+       }
+}
+
+static void
+nv44_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+                u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+       VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
+}
+
+static void
+nv44_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+                u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+#if PAGE_SHIFT == 12
+       nvkm_kmap(pt->memory);
+       if (ptei & 3) {
+               const u32 pten = min(ptes, 4 - (ptei & 3));
+               nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, pten);
+               ptei += pten;
+               ptes -= pten;
+               map->dma += pten;
+       }
+
+       while (ptes >= 4) {
+               u32 tmp[4], i;
+               for (i = 0; i < 4; i++)
+                       tmp[i] = *map->dma++ >> 12;
+               VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >>  0 | tmp[1] << 27);
+               VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >>  5 | tmp[2] << 22);
+               VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+               VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
+               ptes -= 4;
+       }
+
+       if (ptes) {
+               nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, ptes);
+               map->dma += ptes;
+       }
+       nvkm_done(pt->memory);
+#else
+       VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
+#endif
+}
+
+static void
+nv44_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+                  struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+       nvkm_kmap(pt->memory);
+       if (ptei & 3) {
+               const u32 pten = min(ptes, 4 - (ptei & 3));
+               nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, pten);
+               ptei += pten;
+               ptes -= pten;
+       }
+
+       while (ptes > 4) {
+               VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+               VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+               VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+               VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+               ptes -= 4;
+       }
+
+       if (ptes)
+               nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, ptes);
+       nvkm_done(pt->memory);
+}
+
 static const struct nvkm_vmm_desc_func
 nv44_vmm_desc_pgt = {
+       .unmap = nv44_vmm_pgt_unmap,
+       .dma = nv44_vmm_pgt_dma,
+       .sgl = nv44_vmm_pgt_sgl,
 };
 
 static const struct nvkm_vmm_desc
@@ -31,8 +181,23 @@ nv44_vmm_desc_12[] = {
        {}
 };
 
+static void
+nv44_vmm_flush(struct nvkm_vmm *vmm, int level)
+{
+       struct nvkm_device *device = vmm->mmu->subdev.device;
+       nvkm_wr32(device, 0x100814, vmm->limit - 4096);
+       nvkm_wr32(device, 0x100808, 0x000000020);
+       nvkm_msec(device, 2000,
+               if (nvkm_rd32(device, 0x100808) & 0x00000001)
+                       break;
+       );
+       nvkm_wr32(device, 0x100808, 0x00000000);
+}
+
 static const struct nvkm_vmm_func
 nv44_vmm = {
+       .valid = nv04_vmm_valid,
+       .flush = nv44_vmm_flush,
        .page = {
                { 12, &nv44_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
                {}