1 // SPDX-License-Identifier: GPL-2.0-only
3 * SWIOTLB-based DMA API implementation
5 * Copyright (C) 2012 ARM Ltd.
6 * Author: Catalin Marinas <catalin.marinas@arm.com>
10 #include <linux/acpi.h>
11 #include <linux/memblock.h>
12 #include <linux/cache.h>
13 #include <linux/export.h>
14 #include <linux/slab.h>
15 #include <linux/genalloc.h>
16 #include <linux/dma-direct.h>
17 #include <linux/dma-noncoherent.h>
18 #include <linux/dma-contiguous.h>
19 #include <linux/vmalloc.h>
20 #include <linux/swiotlb.h>
21 #include <linux/pci.h>
23 #include <asm/cacheflush.h>
25 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
28 if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE))
29 return pgprot_writecombine(prot);
33 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
34 size_t size, enum dma_data_direction dir)
36 __dma_map_area(phys_to_virt(paddr), size, dir);
39 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
40 size_t size, enum dma_data_direction dir)
42 __dma_unmap_area(phys_to_virt(paddr), size, dir);
45 void arch_dma_prep_coherent(struct page *page, size_t size)
47 __dma_flush_area(page_address(page), size);
50 #ifdef CONFIG_IOMMU_DMA
51 static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
52 struct page *page, size_t size)
54 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
57 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
62 static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
63 unsigned long pfn, size_t size)
66 unsigned long nr_vma_pages = vma_pages(vma);
67 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
68 unsigned long off = vma->vm_pgoff;
70 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
71 ret = remap_pfn_range(vma, vma->vm_start,
73 vma->vm_end - vma->vm_start,
79 #endif /* CONFIG_IOMMU_DMA */
81 static int __init arm64_dma_init(void)
83 WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
84 TAINT_CPU_OUT_OF_SPEC,
85 "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
86 ARCH_DMA_MINALIGN, cache_line_size());
87 return dma_atomic_pool_init(GFP_DMA32, __pgprot(PROT_NORMAL_NC));
89 arch_initcall(arm64_dma_init);
91 #ifdef CONFIG_IOMMU_DMA
92 #include <linux/dma-iommu.h>
93 #include <linux/platform_device.h>
94 #include <linux/amba/bus.h>
96 /* Thankfully, all cache ops are by VA so we can ignore phys here */
97 static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
99 __dma_flush_area(virt, PAGE_SIZE);
102 static void *__iommu_alloc_attrs(struct device *dev, size_t size,
103 dma_addr_t *handle, gfp_t gfp,
106 bool coherent = dev_is_dma_coherent(dev);
107 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
108 size_t iosize = size;
111 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
114 size = PAGE_ALIGN(size);
117 * Some drivers rely on this, and we probably don't want the
118 * possibility of stale kernel data being read by devices anyway.
122 if (!gfpflags_allow_blocking(gfp)) {
125 * In atomic context we can't remap anything, so we'll only
126 * get the virtually contiguous buffer we need by way of a
127 * physically contiguous allocation.
130 page = alloc_pages(gfp, get_order(size));
131 addr = page ? page_address(page) : NULL;
133 addr = dma_alloc_from_pool(size, &page, gfp);
138 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
139 if (*handle == DMA_MAPPING_ERROR) {
141 __free_pages(page, get_order(size));
143 dma_free_from_pool(addr, size);
146 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
147 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
150 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
151 get_order(size), gfp & __GFP_NOWARN);
155 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
156 if (*handle == DMA_MAPPING_ERROR) {
157 dma_release_from_contiguous(dev, page,
161 addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
163 __builtin_return_address(0));
166 __dma_flush_area(page_to_virt(page), iosize);
167 memset(addr, 0, size);
169 iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
170 dma_release_from_contiguous(dev, page,
174 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
177 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
182 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
183 __builtin_return_address(0));
185 iommu_dma_free(dev, pages, iosize, handle);
190 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
191 dma_addr_t handle, unsigned long attrs)
193 size_t iosize = size;
195 size = PAGE_ALIGN(size);
197 * @cpu_addr will be one of 4 things depending on how it was allocated:
198 * - A remapped array of pages for contiguous allocations.
199 * - A remapped array of pages from iommu_dma_alloc(), for all
200 * non-atomic allocations.
201 * - A non-cacheable alias from the atomic pool, for atomic
202 * allocations by non-coherent devices.
203 * - A normal lowmem address, for atomic allocations by
205 * Hence how dodgy the below logic looks...
207 if (dma_in_atomic_pool(cpu_addr, size)) {
208 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
209 dma_free_from_pool(cpu_addr, size);
210 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
211 struct page *page = vmalloc_to_page(cpu_addr);
213 iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
214 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
215 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
216 } else if (is_vmalloc_addr(cpu_addr)){
217 struct vm_struct *area = find_vm_area(cpu_addr);
219 if (WARN_ON(!area || !area->pages))
221 iommu_dma_free(dev, area->pages, iosize, &handle);
222 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
224 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
225 __free_pages(virt_to_page(cpu_addr), get_order(size));
229 static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
230 void *cpu_addr, dma_addr_t dma_addr, size_t size,
233 struct vm_struct *area;
236 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
238 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
241 if (!is_vmalloc_addr(cpu_addr)) {
242 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
243 return __swiotlb_mmap_pfn(vma, pfn, size);
246 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
248 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
249 * hence in the vmalloc space.
251 unsigned long pfn = vmalloc_to_pfn(cpu_addr);
252 return __swiotlb_mmap_pfn(vma, pfn, size);
255 area = find_vm_area(cpu_addr);
256 if (WARN_ON(!area || !area->pages))
259 return iommu_dma_mmap(area->pages, size, vma);
262 static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
263 void *cpu_addr, dma_addr_t dma_addr,
264 size_t size, unsigned long attrs)
266 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
267 struct vm_struct *area = find_vm_area(cpu_addr);
269 if (!is_vmalloc_addr(cpu_addr)) {
270 struct page *page = virt_to_page(cpu_addr);
271 return __swiotlb_get_sgtable_page(sgt, page, size);
274 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
276 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
277 * hence in the vmalloc space.
279 struct page *page = vmalloc_to_page(cpu_addr);
280 return __swiotlb_get_sgtable_page(sgt, page, size);
283 if (WARN_ON(!area || !area->pages))
286 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
290 static void __iommu_sync_single_for_cpu(struct device *dev,
291 dma_addr_t dev_addr, size_t size,
292 enum dma_data_direction dir)
296 if (dev_is_dma_coherent(dev))
299 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
300 arch_sync_dma_for_cpu(dev, phys, size, dir);
303 static void __iommu_sync_single_for_device(struct device *dev,
304 dma_addr_t dev_addr, size_t size,
305 enum dma_data_direction dir)
309 if (dev_is_dma_coherent(dev))
312 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
313 arch_sync_dma_for_device(dev, phys, size, dir);
316 static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
317 unsigned long offset, size_t size,
318 enum dma_data_direction dir,
321 bool coherent = dev_is_dma_coherent(dev);
322 int prot = dma_info_to_prot(dir, coherent, attrs);
323 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
325 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
326 dev_addr != DMA_MAPPING_ERROR)
327 __dma_map_area(page_address(page) + offset, size, dir);
332 static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
333 size_t size, enum dma_data_direction dir,
336 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
337 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
339 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
342 static void __iommu_sync_sg_for_cpu(struct device *dev,
343 struct scatterlist *sgl, int nelems,
344 enum dma_data_direction dir)
346 struct scatterlist *sg;
349 if (dev_is_dma_coherent(dev))
352 for_each_sg(sgl, sg, nelems, i)
353 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
356 static void __iommu_sync_sg_for_device(struct device *dev,
357 struct scatterlist *sgl, int nelems,
358 enum dma_data_direction dir)
360 struct scatterlist *sg;
363 if (dev_is_dma_coherent(dev))
366 for_each_sg(sgl, sg, nelems, i)
367 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
370 static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
371 int nelems, enum dma_data_direction dir,
374 bool coherent = dev_is_dma_coherent(dev);
376 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
377 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
379 return iommu_dma_map_sg(dev, sgl, nelems,
380 dma_info_to_prot(dir, coherent, attrs));
383 static void __iommu_unmap_sg_attrs(struct device *dev,
384 struct scatterlist *sgl, int nelems,
385 enum dma_data_direction dir,
388 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
389 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
391 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
394 static const struct dma_map_ops iommu_dma_ops = {
395 .alloc = __iommu_alloc_attrs,
396 .free = __iommu_free_attrs,
397 .mmap = __iommu_mmap_attrs,
398 .get_sgtable = __iommu_get_sgtable,
399 .map_page = __iommu_map_page,
400 .unmap_page = __iommu_unmap_page,
401 .map_sg = __iommu_map_sg_attrs,
402 .unmap_sg = __iommu_unmap_sg_attrs,
403 .sync_single_for_cpu = __iommu_sync_single_for_cpu,
404 .sync_single_for_device = __iommu_sync_single_for_device,
405 .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
406 .sync_sg_for_device = __iommu_sync_sg_for_device,
407 .map_resource = iommu_dma_map_resource,
408 .unmap_resource = iommu_dma_unmap_resource,
411 static int __init __iommu_dma_init(void)
413 return iommu_dma_init();
415 arch_initcall(__iommu_dma_init);
417 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
418 const struct iommu_ops *ops)
420 struct iommu_domain *domain;
426 * The IOMMU core code allocates the default DMA domain, which the
427 * underlying IOMMU driver needs to support via the dma-iommu layer.
429 domain = iommu_get_domain_for_dev(dev);
434 if (domain->type == IOMMU_DOMAIN_DMA) {
435 if (iommu_dma_init_domain(domain, dma_base, size, dev))
438 dev->dma_ops = &iommu_dma_ops;
444 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
448 void arch_teardown_dma_ops(struct device *dev)
455 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
456 const struct iommu_ops *iommu)
459 #endif /* CONFIG_IOMMU_DMA */
461 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
462 const struct iommu_ops *iommu, bool coherent)
464 dev->dma_coherent = coherent;
465 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
468 if (xen_initial_domain())
469 dev->dma_ops = xen_dma_ops;