]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drm/etnaviv: drop use of drmP.h
[linux.git] / drivers / gpu / drm / etnaviv / etnaviv_mmu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5
6 #include <linux/scatterlist.h>
7
8 #include "common.xml.h"
9 #include "etnaviv_cmdbuf.h"
10 #include "etnaviv_drv.h"
11 #include "etnaviv_gem.h"
12 #include "etnaviv_gpu.h"
13 #include "etnaviv_iommu.h"
14 #include "etnaviv_mmu.h"
15
16 static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
17                                  unsigned long iova, size_t size)
18 {
19         size_t unmapped_page, unmapped = 0;
20         size_t pgsize = SZ_4K;
21
22         if (!IS_ALIGNED(iova | size, pgsize)) {
23                 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
24                        iova, size, pgsize);
25                 return;
26         }
27
28         while (unmapped < size) {
29                 unmapped_page = domain->ops->unmap(domain, iova, pgsize);
30                 if (!unmapped_page)
31                         break;
32
33                 iova += unmapped_page;
34                 unmapped += unmapped_page;
35         }
36 }
37
38 static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
39                               unsigned long iova, phys_addr_t paddr,
40                               size_t size, int prot)
41 {
42         unsigned long orig_iova = iova;
43         size_t pgsize = SZ_4K;
44         size_t orig_size = size;
45         int ret = 0;
46
47         if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
48                 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
49                        iova, &paddr, size, pgsize);
50                 return -EINVAL;
51         }
52
53         while (size) {
54                 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
55                 if (ret)
56                         break;
57
58                 iova += pgsize;
59                 paddr += pgsize;
60                 size -= pgsize;
61         }
62
63         /* unroll mapping in case something went wrong */
64         if (ret)
65                 etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
66
67         return ret;
68 }
69
70 static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
71                              struct sg_table *sgt, unsigned len, int prot)
72 {
73         struct etnaviv_iommu_domain *domain = iommu->domain;
74         struct scatterlist *sg;
75         unsigned int da = iova;
76         unsigned int i, j;
77         int ret;
78
79         if (!domain || !sgt)
80                 return -EINVAL;
81
82         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
83                 u32 pa = sg_dma_address(sg) - sg->offset;
84                 size_t bytes = sg_dma_len(sg) + sg->offset;
85
86                 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
87
88                 ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
89                 if (ret)
90                         goto fail;
91
92                 da += bytes;
93         }
94
95         return 0;
96
97 fail:
98         da = iova;
99
100         for_each_sg(sgt->sgl, sg, i, j) {
101                 size_t bytes = sg_dma_len(sg) + sg->offset;
102
103                 etnaviv_domain_unmap(domain, da, bytes);
104                 da += bytes;
105         }
106         return ret;
107 }
108
109 static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
110                                 struct sg_table *sgt, unsigned len)
111 {
112         struct etnaviv_iommu_domain *domain = iommu->domain;
113         struct scatterlist *sg;
114         unsigned int da = iova;
115         int i;
116
117         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
118                 size_t bytes = sg_dma_len(sg) + sg->offset;
119
120                 etnaviv_domain_unmap(domain, da, bytes);
121
122                 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
123
124                 BUG_ON(!PAGE_ALIGNED(bytes));
125
126                 da += bytes;
127         }
128 }
129
130 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
131         struct etnaviv_vram_mapping *mapping)
132 {
133         struct etnaviv_gem_object *etnaviv_obj = mapping->object;
134
135         etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
136                             etnaviv_obj->sgt, etnaviv_obj->base.size);
137         drm_mm_remove_node(&mapping->vram_node);
138 }
139
140 static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
141                                    struct drm_mm_node *node, size_t size)
142 {
143         struct etnaviv_vram_mapping *free = NULL;
144         enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
145         int ret;
146
147         lockdep_assert_held(&mmu->lock);
148
149         while (1) {
150                 struct etnaviv_vram_mapping *m, *n;
151                 struct drm_mm_scan scan;
152                 struct list_head list;
153                 bool found;
154
155                 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
156                                                   size, 0, 0, 0, U64_MAX, mode);
157                 if (ret != -ENOSPC)
158                         break;
159
160                 /* Try to retire some entries */
161                 drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
162
163                 found = 0;
164                 INIT_LIST_HEAD(&list);
165                 list_for_each_entry(free, &mmu->mappings, mmu_node) {
166                         /* If this vram node has not been used, skip this. */
167                         if (!free->vram_node.mm)
168                                 continue;
169
170                         /*
171                          * If the iova is pinned, then it's in-use,
172                          * so we must keep its mapping.
173                          */
174                         if (free->use)
175                                 continue;
176
177                         list_add(&free->scan_node, &list);
178                         if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
179                                 found = true;
180                                 break;
181                         }
182                 }
183
184                 if (!found) {
185                         /* Nothing found, clean up and fail */
186                         list_for_each_entry_safe(m, n, &list, scan_node)
187                                 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
188                         break;
189                 }
190
191                 /*
192                  * drm_mm does not allow any other operations while
193                  * scanning, so we have to remove all blocks first.
194                  * If drm_mm_scan_remove_block() returns false, we
195                  * can leave the block pinned.
196                  */
197                 list_for_each_entry_safe(m, n, &list, scan_node)
198                         if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
199                                 list_del_init(&m->scan_node);
200
201                 /*
202                  * Unmap the blocks which need to be reaped from the MMU.
203                  * Clear the mmu pointer to prevent the mapping_get finding
204                  * this mapping.
205                  */
206                 list_for_each_entry_safe(m, n, &list, scan_node) {
207                         etnaviv_iommu_remove_mapping(mmu, m);
208                         m->mmu = NULL;
209                         list_del_init(&m->mmu_node);
210                         list_del_init(&m->scan_node);
211                 }
212
213                 mode = DRM_MM_INSERT_EVICT;
214
215                 /*
216                  * We removed enough mappings so that the new allocation will
217                  * succeed, retry the allocation one more time.
218                  */
219         }
220
221         return ret;
222 }
223
224 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
225         struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
226         struct etnaviv_vram_mapping *mapping)
227 {
228         struct sg_table *sgt = etnaviv_obj->sgt;
229         struct drm_mm_node *node;
230         int ret;
231
232         lockdep_assert_held(&etnaviv_obj->lock);
233
234         mutex_lock(&mmu->lock);
235
236         /* v1 MMU can optimize single entry (contiguous) scatterlists */
237         if (mmu->version == ETNAVIV_IOMMU_V1 &&
238             sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
239                 u32 iova;
240
241                 iova = sg_dma_address(sgt->sgl) - memory_base;
242                 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
243                         mapping->iova = iova;
244                         list_add_tail(&mapping->mmu_node, &mmu->mappings);
245                         ret = 0;
246                         goto unlock;
247                 }
248         }
249
250         node = &mapping->vram_node;
251
252         ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
253         if (ret < 0)
254                 goto unlock;
255
256         mapping->iova = node->start;
257         ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
258                                 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
259
260         if (ret < 0) {
261                 drm_mm_remove_node(node);
262                 goto unlock;
263         }
264
265         list_add_tail(&mapping->mmu_node, &mmu->mappings);
266         mmu->need_flush = true;
267 unlock:
268         mutex_unlock(&mmu->lock);
269
270         return ret;
271 }
272
273 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
274         struct etnaviv_vram_mapping *mapping)
275 {
276         WARN_ON(mapping->use);
277
278         mutex_lock(&mmu->lock);
279
280         /* If the vram node is on the mm, unmap and remove the node */
281         if (mapping->vram_node.mm == &mmu->mm)
282                 etnaviv_iommu_remove_mapping(mmu, mapping);
283
284         list_del(&mapping->mmu_node);
285         mmu->need_flush = true;
286         mutex_unlock(&mmu->lock);
287 }
288
289 void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
290 {
291         drm_mm_takedown(&mmu->mm);
292         mmu->domain->ops->free(mmu->domain);
293         kfree(mmu);
294 }
295
296 struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
297 {
298         enum etnaviv_iommu_version version;
299         struct etnaviv_iommu *mmu;
300
301         mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
302         if (!mmu)
303                 return ERR_PTR(-ENOMEM);
304
305         if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
306                 mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
307                 version = ETNAVIV_IOMMU_V1;
308         } else {
309                 mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
310                 version = ETNAVIV_IOMMU_V2;
311         }
312
313         if (!mmu->domain) {
314                 dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
315                 kfree(mmu);
316                 return ERR_PTR(-ENOMEM);
317         }
318
319         mmu->gpu = gpu;
320         mmu->version = version;
321         mutex_init(&mmu->lock);
322         INIT_LIST_HEAD(&mmu->mappings);
323
324         drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
325
326         return mmu;
327 }
328
329 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
330 {
331         if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
332                 etnaviv_iommuv1_restore(gpu);
333         else
334                 etnaviv_iommuv2_restore(gpu);
335 }
336
337 int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
338                                   struct drm_mm_node *vram_node, size_t size,
339                                   u32 *iova)
340 {
341         struct etnaviv_iommu *mmu = gpu->mmu;
342
343         if (mmu->version == ETNAVIV_IOMMU_V1) {
344                 *iova = paddr - gpu->memory_base;
345                 return 0;
346         } else {
347                 int ret;
348
349                 mutex_lock(&mmu->lock);
350                 ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
351                 if (ret < 0) {
352                         mutex_unlock(&mmu->lock);
353                         return ret;
354                 }
355                 ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr,
356                                          size, ETNAVIV_PROT_READ);
357                 if (ret < 0) {
358                         drm_mm_remove_node(vram_node);
359                         mutex_unlock(&mmu->lock);
360                         return ret;
361                 }
362                 gpu->mmu->need_flush = true;
363                 mutex_unlock(&mmu->lock);
364
365                 *iova = (u32)vram_node->start;
366                 return 0;
367         }
368 }
369
370 void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
371                                    struct drm_mm_node *vram_node, size_t size,
372                                    u32 iova)
373 {
374         struct etnaviv_iommu *mmu = gpu->mmu;
375
376         if (mmu->version == ETNAVIV_IOMMU_V2) {
377                 mutex_lock(&mmu->lock);
378                 etnaviv_domain_unmap(mmu->domain, iova, size);
379                 drm_mm_remove_node(vram_node);
380                 mutex_unlock(&mmu->lock);
381         }
382 }
383 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
384 {
385         return iommu->domain->ops->dump_size(iommu->domain);
386 }
387
388 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
389 {
390         iommu->domain->ops->dump(iommu->domain, buf);
391 }