]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/nouveau: consolidate handling of dma mask
authorBen Skeggs <bskeggs@redhat.com>
Tue, 31 Oct 2017 17:56:19 +0000 (03:56 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Thu, 2 Nov 2017 03:32:32 +0000 (13:32 +1000)
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c

index 8c4f45ea62341f53cb68701ef534f88e7a77b7ed..3211f78da4dd5525d33e0b44e95a5c58ad839caf 100644 (file)
@@ -253,7 +253,6 @@ nouveau_ttm_init(struct nouveau_drm *drm)
        struct nvkm_device *device = nvxx_device(&drm->client.device);
        struct nvkm_pci *pci = device->pci;
        struct drm_device *dev = drm->dev;
-       u8 bits;
        int ret;
 
        if (pci && pci->agp.bridge) {
@@ -263,34 +262,6 @@ nouveau_ttm_init(struct nouveau_drm *drm)
                drm->agp.cma = pci->agp.cma;
        }
 
-       bits = nvxx_mmu(&drm->client.device)->dma_bits;
-       if (nvxx_device(&drm->client.device)->func->pci) {
-               if (drm->agp.bridge)
-                       bits = 32;
-       } else if (device->func->tegra) {
-               struct nvkm_device_tegra *tegra = device->func->tegra(device);
-
-               /*
-                * If the platform can use a IOMMU, then the addressable DMA
-                * space is constrained by the IOMMU bit
-                */
-               if (tegra->func->iommu_bit)
-                       bits = min(bits, tegra->func->iommu_bit);
-
-       }
-
-       ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
-       if (ret && bits != 32) {
-               bits = 32;
-               ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
-       }
-       if (ret)
-               return ret;
-
-       ret = dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(bits));
-       if (ret)
-               dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(32));
-
        ret = nouveau_ttm_global_init(drm);
        if (ret)
                return ret;
@@ -300,7 +271,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
                                  &nouveau_bo_driver,
                                  dev->anon_inode->i_mapping,
                                  DRM_FILE_PAGE_OFFSET,
-                                 bits <= 32 ? true : false);
+                                 drm->client.mmu.dmabits <= 32 ? true : false);
        if (ret) {
                NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
                return ret;
index 74a1ffa425f73d00e832b4f44e803db63300ef68..f302d2b5782a1e2e9c9a31f9ae378a99f6736571 100644 (file)
@@ -1627,7 +1627,7 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
        const struct nvkm_device_pci_vendor *pciv;
        const char *name = NULL;
        struct nvkm_device_pci *pdev;
-       int ret;
+       int ret, bits;
 
        ret = pci_enable_device(pci_dev);
        if (ret)
@@ -1679,17 +1679,17 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
        if (ret)
                return ret;
 
-       /*
-        * Set a preliminary DMA mask based on the .dma_bits member of the
-        * MMU subdevice. This allows other subdevices to create DMA mappings
-        * in their init() or oneinit() methods, which may be called before the
-        * TTM layer sets the DMA mask definitively.
-        * This is necessary for platforms where the default DMA mask of 32
-        * does not cover any system memory, i.e., when all RAM is > 4 GB.
-        */
-       if (pdev->device.mmu)
-               dma_set_mask_and_coherent(&pci_dev->dev,
-                               DMA_BIT_MASK(pdev->device.mmu->dma_bits));
+       /* Set DMA mask based on capabilities reported by the MMU subdev. */
+       if (pdev->device.mmu && !pdev->device.pci->agp.bridge)
+               bits = pdev->device.mmu->dma_bits;
+       else
+               bits = 32;
+
+       ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(bits));
+       if (ret && bits != 32) {
+               dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
+               pdev->device.mmu->dma_bits = 32;
+       }
 
        return 0;
 }
index d35a32e168d6e92754a3d84902e2e051703c3b85..78597da6313ade568dc1ddb7a19582c3a778a091 100644 (file)
@@ -309,8 +309,6 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
 
        /**
         * The IOMMU bit defines the upper limit of the GPU-addressable space.
-        * This will be refined in nouveau_ttm_init but we need to do it early
-        * for instmem to behave properly
         */
        ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
        if (ret)