1 // SPDX-License-Identifier: GPL-2.0
3 * arch-independent dma-mapping routines
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
8 #include <linux/memblock.h> /* for max_pfn */
9 #include <linux/acpi.h>
10 #include <linux/dma-direct.h>
11 #include <linux/dma-noncoherent.h>
12 #include <linux/export.h>
13 #include <linux/gfp.h>
14 #include <linux/of_device.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
24 dma_addr_t dma_handle;
28 static void dmam_release(struct device *dev, void *res)
30 struct dma_devres *this = res;
32 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
36 static int dmam_match(struct device *dev, void *res, void *match_data)
38 struct dma_devres *this = res, *match = match_data;
40 if (this->vaddr == match->vaddr) {
41 WARN_ON(this->size != match->size ||
42 this->dma_handle != match->dma_handle);
49 * dmam_free_coherent - Managed dma_free_coherent()
50 * @dev: Device to free coherent memory for
51 * @size: Size of allocation
52 * @vaddr: Virtual address of the memory to free
53 * @dma_handle: DMA handle of the memory to free
55 * Managed dma_free_coherent().
57 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
58 dma_addr_t dma_handle)
60 struct dma_devres match_data = { size, vaddr, dma_handle };
62 dma_free_coherent(dev, size, vaddr, dma_handle);
63 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
65 EXPORT_SYMBOL(dmam_free_coherent);
68 * dmam_alloc_attrs - Managed dma_alloc_attrs()
69 * @dev: Device to allocate non_coherent memory for
70 * @size: Size of allocation
71 * @dma_handle: Out argument for allocated DMA handle
72 * @gfp: Allocation flags
73 * @attrs: Flags in the DMA_ATTR_* namespace.
75 * Managed dma_alloc_attrs(). Memory allocated using this function will be
76 * automatically released on driver detach.
79 * Pointer to allocated memory on success, NULL on failure.
81 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
82 gfp_t gfp, unsigned long attrs)
84 struct dma_devres *dr;
87 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
91 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
98 dr->dma_handle = *dma_handle;
106 EXPORT_SYMBOL(dmam_alloc_attrs);
109 * Create scatter-list for the already allocated DMA buffer.
111 int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
112 void *cpu_addr, dma_addr_t dma_addr, size_t size,
118 if (!dev_is_dma_coherent(dev)) {
119 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
122 page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr,
125 page = virt_to_page(cpu_addr);
128 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
130 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
134 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
135 void *cpu_addr, dma_addr_t dma_addr, size_t size,
138 const struct dma_map_ops *ops = get_dma_ops(dev);
140 if (!dma_is_direct(ops) && ops->get_sgtable)
141 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
143 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
146 EXPORT_SYMBOL(dma_get_sgtable_attrs);
149 * Create userspace mapping for the DMA-coherent memory.
151 int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
152 void *cpu_addr, dma_addr_t dma_addr, size_t size,
155 #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
156 unsigned long user_count = vma_pages(vma);
157 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
158 unsigned long off = vma->vm_pgoff;
162 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
164 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
167 if (off >= count || user_count > count - off)
170 if (!dev_is_dma_coherent(dev)) {
171 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
173 pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
175 pfn = page_to_pfn(virt_to_page(cpu_addr));
178 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
179 user_count << PAGE_SHIFT, vma->vm_page_prot);
182 #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
186 * dma_mmap_attrs - map a coherent DMA allocation into user space
187 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
188 * @vma: vm_area_struct describing requested user mapping
189 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
190 * @dma_addr: device-view address returned from dma_alloc_attrs
191 * @size: size of memory originally requested in dma_alloc_attrs
192 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
194 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
195 * space. The coherent DMA buffer must not be freed by the driver until the
196 * user space mapping has been released.
198 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
199 void *cpu_addr, dma_addr_t dma_addr, size_t size,
202 const struct dma_map_ops *ops = get_dma_ops(dev);
204 if (!dma_is_direct(ops) && ops->mmap)
205 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
206 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
208 EXPORT_SYMBOL(dma_mmap_attrs);
210 static u64 dma_default_get_required_mask(struct device *dev)
212 u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
213 u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
216 if (!high_totalram) {
217 /* convert to mask just covering totalram */
218 low_totalram = (1 << (fls(low_totalram) - 1));
219 low_totalram += low_totalram - 1;
222 high_totalram = (1 << (fls(high_totalram) - 1));
223 high_totalram += high_totalram - 1;
224 mask = (((u64)high_totalram) << 32) + 0xffffffff;
229 u64 dma_get_required_mask(struct device *dev)
231 const struct dma_map_ops *ops = get_dma_ops(dev);
233 if (dma_is_direct(ops))
234 return dma_direct_get_required_mask(dev);
235 if (ops->get_required_mask)
236 return ops->get_required_mask(dev);
237 return dma_default_get_required_mask(dev);
239 EXPORT_SYMBOL_GPL(dma_get_required_mask);
241 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
242 gfp_t flag, unsigned long attrs)
244 const struct dma_map_ops *ops = get_dma_ops(dev);
247 WARN_ON_ONCE(!dev->coherent_dma_mask);
249 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
252 /* let the implementation decide on the zone to allocate from: */
253 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
255 if (dma_is_direct(ops))
256 cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
258 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
262 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
265 EXPORT_SYMBOL(dma_alloc_attrs);
267 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
268 dma_addr_t dma_handle, unsigned long attrs)
270 const struct dma_map_ops *ops = get_dma_ops(dev);
272 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
275 * On non-coherent platforms which implement DMA-coherent buffers via
276 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
277 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
278 * sleep on some machines, and b) an indication that the driver is
279 * probably misusing the coherent API anyway.
281 WARN_ON(irqs_disabled());
286 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
287 if (dma_is_direct(ops))
288 dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
290 ops->free(dev, size, cpu_addr, dma_handle, attrs);
292 EXPORT_SYMBOL(dma_free_attrs);
294 static inline void dma_check_mask(struct device *dev, u64 mask)
296 if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
297 dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
300 int dma_supported(struct device *dev, u64 mask)
302 const struct dma_map_ops *ops = get_dma_ops(dev);
304 if (dma_is_direct(ops))
305 return dma_direct_supported(dev, mask);
306 if (!ops->dma_supported)
308 return ops->dma_supported(dev, mask);
310 EXPORT_SYMBOL(dma_supported);
312 #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
313 void arch_dma_set_mask(struct device *dev, u64 mask);
315 #define arch_dma_set_mask(dev, mask) do { } while (0)
318 int dma_set_mask(struct device *dev, u64 mask)
320 if (!dev->dma_mask || !dma_supported(dev, mask))
323 arch_dma_set_mask(dev, mask);
324 dma_check_mask(dev, mask);
325 *dev->dma_mask = mask;
328 EXPORT_SYMBOL(dma_set_mask);
330 #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
331 int dma_set_coherent_mask(struct device *dev, u64 mask)
333 if (!dma_supported(dev, mask))
336 dma_check_mask(dev, mask);
337 dev->coherent_dma_mask = mask;
340 EXPORT_SYMBOL(dma_set_coherent_mask);
343 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
344 enum dma_data_direction dir)
346 const struct dma_map_ops *ops = get_dma_ops(dev);
348 BUG_ON(!valid_dma_direction(dir));
350 if (dma_is_direct(ops))
351 arch_dma_cache_sync(dev, vaddr, size, dir);
352 else if (ops->cache_sync)
353 ops->cache_sync(dev, vaddr, size, dir);
355 EXPORT_SYMBOL(dma_cache_sync);
357 size_t dma_max_mapping_size(struct device *dev)
359 const struct dma_map_ops *ops = get_dma_ops(dev);
360 size_t size = SIZE_MAX;
362 if (dma_is_direct(ops))
363 size = dma_direct_max_mapping_size(dev);
364 else if (ops && ops->max_mapping_size)
365 size = ops->max_mapping_size(dev);
369 EXPORT_SYMBOL_GPL(dma_max_mapping_size);