1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #include <linux/scatterlist.h>
8 #include "common.xml.h"
9 #include "etnaviv_cmdbuf.h"
10 #include "etnaviv_drv.h"
11 #include "etnaviv_gem.h"
12 #include "etnaviv_gpu.h"
13 #include "etnaviv_iommu.h"
14 #include "etnaviv_mmu.h"
16 static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
17 unsigned long iova, size_t size)
19 size_t unmapped_page, unmapped = 0;
20 size_t pgsize = SZ_4K;
22 if (!IS_ALIGNED(iova | size, pgsize)) {
23 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
28 while (unmapped < size) {
29 unmapped_page = domain->ops->unmap(domain, iova, pgsize);
33 iova += unmapped_page;
34 unmapped += unmapped_page;
38 static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
39 unsigned long iova, phys_addr_t paddr,
40 size_t size, int prot)
42 unsigned long orig_iova = iova;
43 size_t pgsize = SZ_4K;
44 size_t orig_size = size;
47 if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
48 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
49 iova, &paddr, size, pgsize);
54 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
63 /* unroll mapping in case something went wrong */
65 etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
70 static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
71 struct sg_table *sgt, unsigned len, int prot)
73 struct etnaviv_iommu_domain *domain = iommu->domain;
74 struct scatterlist *sg;
75 unsigned int da = iova;
82 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
83 u32 pa = sg_dma_address(sg) - sg->offset;
84 size_t bytes = sg_dma_len(sg) + sg->offset;
86 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
88 ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
100 for_each_sg(sgt->sgl, sg, i, j) {
101 size_t bytes = sg_dma_len(sg) + sg->offset;
103 etnaviv_domain_unmap(domain, da, bytes);
109 static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
110 struct sg_table *sgt, unsigned len)
112 struct etnaviv_iommu_domain *domain = iommu->domain;
113 struct scatterlist *sg;
114 unsigned int da = iova;
117 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
118 size_t bytes = sg_dma_len(sg) + sg->offset;
120 etnaviv_domain_unmap(domain, da, bytes);
122 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
124 BUG_ON(!PAGE_ALIGNED(bytes));
130 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
131 struct etnaviv_vram_mapping *mapping)
133 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
135 etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
136 etnaviv_obj->sgt, etnaviv_obj->base.size);
137 drm_mm_remove_node(&mapping->vram_node);
140 static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
141 struct drm_mm_node *node, size_t size)
143 struct etnaviv_vram_mapping *free = NULL;
144 enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
147 lockdep_assert_held(&mmu->lock);
150 struct etnaviv_vram_mapping *m, *n;
151 struct drm_mm_scan scan;
152 struct list_head list;
155 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
156 size, 0, 0, 0, U64_MAX, mode);
160 /* Try to retire some entries */
161 drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
164 INIT_LIST_HEAD(&list);
165 list_for_each_entry(free, &mmu->mappings, mmu_node) {
166 /* If this vram node has not been used, skip this. */
167 if (!free->vram_node.mm)
171 * If the iova is pinned, then it's in-use,
172 * so we must keep its mapping.
177 list_add(&free->scan_node, &list);
178 if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
185 /* Nothing found, clean up and fail */
186 list_for_each_entry_safe(m, n, &list, scan_node)
187 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
192 * drm_mm does not allow any other operations while
193 * scanning, so we have to remove all blocks first.
194 * If drm_mm_scan_remove_block() returns false, we
195 * can leave the block pinned.
197 list_for_each_entry_safe(m, n, &list, scan_node)
198 if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
199 list_del_init(&m->scan_node);
202 * Unmap the blocks which need to be reaped from the MMU.
203 * Clear the mmu pointer to prevent the mapping_get finding
206 list_for_each_entry_safe(m, n, &list, scan_node) {
207 etnaviv_iommu_remove_mapping(mmu, m);
209 list_del_init(&m->mmu_node);
210 list_del_init(&m->scan_node);
213 mode = DRM_MM_INSERT_EVICT;
216 * We removed enough mappings so that the new allocation will
217 * succeed, retry the allocation one more time.
224 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
225 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
226 struct etnaviv_vram_mapping *mapping)
228 struct sg_table *sgt = etnaviv_obj->sgt;
229 struct drm_mm_node *node;
232 lockdep_assert_held(&etnaviv_obj->lock);
234 mutex_lock(&mmu->lock);
236 /* v1 MMU can optimize single entry (contiguous) scatterlists */
237 if (mmu->version == ETNAVIV_IOMMU_V1 &&
238 sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
241 iova = sg_dma_address(sgt->sgl) - memory_base;
242 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
243 mapping->iova = iova;
244 list_add_tail(&mapping->mmu_node, &mmu->mappings);
250 node = &mapping->vram_node;
252 ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
256 mapping->iova = node->start;
257 ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
258 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
261 drm_mm_remove_node(node);
265 list_add_tail(&mapping->mmu_node, &mmu->mappings);
266 mmu->need_flush = true;
268 mutex_unlock(&mmu->lock);
273 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
274 struct etnaviv_vram_mapping *mapping)
276 WARN_ON(mapping->use);
278 mutex_lock(&mmu->lock);
280 /* If the vram node is on the mm, unmap and remove the node */
281 if (mapping->vram_node.mm == &mmu->mm)
282 etnaviv_iommu_remove_mapping(mmu, mapping);
284 list_del(&mapping->mmu_node);
285 mmu->need_flush = true;
286 mutex_unlock(&mmu->lock);
289 void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
291 drm_mm_takedown(&mmu->mm);
292 mmu->domain->ops->free(mmu->domain);
296 struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
298 enum etnaviv_iommu_version version;
299 struct etnaviv_iommu *mmu;
301 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
303 return ERR_PTR(-ENOMEM);
305 if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
306 mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
307 version = ETNAVIV_IOMMU_V1;
309 mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
310 version = ETNAVIV_IOMMU_V2;
314 dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
316 return ERR_PTR(-ENOMEM);
320 mmu->version = version;
321 mutex_init(&mmu->lock);
322 INIT_LIST_HEAD(&mmu->mappings);
324 drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
329 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
331 if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
332 etnaviv_iommuv1_restore(gpu);
334 etnaviv_iommuv2_restore(gpu);
337 int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
338 struct drm_mm_node *vram_node, size_t size,
341 struct etnaviv_iommu *mmu = gpu->mmu;
343 if (mmu->version == ETNAVIV_IOMMU_V1) {
344 *iova = paddr - gpu->memory_base;
349 mutex_lock(&mmu->lock);
350 ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
352 mutex_unlock(&mmu->lock);
355 ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr,
356 size, ETNAVIV_PROT_READ);
358 drm_mm_remove_node(vram_node);
359 mutex_unlock(&mmu->lock);
362 gpu->mmu->need_flush = true;
363 mutex_unlock(&mmu->lock);
365 *iova = (u32)vram_node->start;
370 void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
371 struct drm_mm_node *vram_node, size_t size,
374 struct etnaviv_iommu *mmu = gpu->mmu;
376 if (mmu->version == ETNAVIV_IOMMU_V2) {
377 mutex_lock(&mmu->lock);
378 etnaviv_domain_unmap(mmu->domain, iova, size);
379 drm_mm_remove_node(vram_node);
380 mutex_unlock(&mmu->lock);
383 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
385 return iommu->domain->ops->dump_size(iommu->domain);
388 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
390 iommu->domain->ops->dump(iommu->domain, buf);