]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/etnaviv: remove cycling through MMU address space
authorLucas Stach <l.stach@pengutronix.de>
Fri, 9 Mar 2018 11:53:34 +0000 (12:53 +0100)
committerLucas Stach <l.stach@pengutronix.de>
Fri, 18 May 2018 13:27:56 +0000 (15:27 +0200)
This was useful on MMUv1 GPUs, which don't generate proper faults,
when the GPU write caches weren't fully understood and not properly
handled by the kernel driver. As this has been fixed for quite some
time, the cycling though the MMU address space needlessly spreads
out the MMU mappings.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.h

index 49e049713a529cb9088d024e2b7925512b7c4ea5..e8e8c4fe3242fdda529a94b4e566f7f3ea73f752 100644 (file)
@@ -162,22 +162,10 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
                bool found;
 
                ret = drm_mm_insert_node_in_range(&mmu->mm, node,
-                                                 size, 0, 0,
-                                                 mmu->last_iova, U64_MAX,
-                                                 mode);
+                                                 size, 0, 0, 0, U64_MAX, mode);
                if (ret != -ENOSPC)
                        break;
 
-               /*
-                * If we did not search from the start of the MMU region,
-                * try again in case there are free slots.
-                */
-               if (mmu->last_iova) {
-                       mmu->last_iova = 0;
-                       mmu->need_flush = true;
-                       continue;
-               }
-
                /* Try to retire some entries */
                drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
 
@@ -274,7 +262,6 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
        if (ret < 0)
                goto unlock;
 
-       mmu->last_iova = node->start + etnaviv_obj->base.size;
        mapping->iova = node->start;
        ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
                                ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
@@ -381,7 +368,6 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
                        mutex_unlock(&mmu->lock);
                        return ret;
                }
-               mmu->last_iova = vram_node->start + size;
                gpu->mmu->need_flush = true;
                mutex_unlock(&mmu->lock);
 
index ab603f5166b10b5519cbe4a1272c6d430ea5f109..a339ec5798ff58c20b3caf34e4315626a538edac 100644 (file)
@@ -59,7 +59,6 @@ struct etnaviv_iommu {
        struct mutex lock;
        struct list_head mappings;
        struct drm_mm mm;
-       u32 last_iova;
        bool need_flush;
 };