1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Christoph Hellwig.
5 * DMA operations that map physical memory directly without using an IOMMU.
7 #include <linux/memblock.h> /* for max_pfn */
8 #include <linux/export.h>
10 #include <linux/dma-direct.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-contiguous.h>
13 #include <linux/dma-noncoherent.h>
14 #include <linux/pfn.h>
15 #include <linux/vmalloc.h>
16 #include <linux/set_memory.h>
17 #include <linux/swiotlb.h>
20 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it
21 * it for entirely different regions. In that case the arch code needs to
22 * override the variable below for dma-direct to work properly.
24 unsigned int zone_dma_bits __ro_after_init = 24;
26 static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
29 dev_err_once(dev, "DMA map on device without dma_mask\n");
30 } else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_limit) {
32 "overflow %pad+%zu of DMA mask %llx bus limit %llx\n",
33 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
38 static inline dma_addr_t phys_to_dma_direct(struct device *dev,
41 if (force_dma_unencrypted(dev))
42 return __phys_to_dma(dev, phys);
43 return phys_to_dma(dev, phys);
46 static inline struct page *dma_direct_to_page(struct device *dev,
49 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
52 u64 dma_direct_get_required_mask(struct device *dev)
54 u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
56 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
59 static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
62 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
64 if (force_dma_unencrypted(dev))
65 *phys_limit = __dma_to_phys(dev, dma_limit);
67 *phys_limit = dma_to_phys(dev, dma_limit);
70 * Optimistically try the zone that the physical address mask falls
71 * into first. If that returns memory that isn't actually addressable
72 * we will fallback to the next lower zone and try again.
74 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
77 if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
79 if (*phys_limit <= DMA_BIT_MASK(32))
84 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
86 return phys_to_dma_direct(dev, phys) + size - 1 <=
87 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
90 struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
91 gfp_t gfp, unsigned long attrs)
93 size_t alloc_size = PAGE_ALIGN(size);
94 int node = dev_to_node(dev);
95 struct page *page = NULL;
98 if (attrs & DMA_ATTR_NO_WARN)
101 /* we always manually zero the memory once we are done: */
103 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
105 page = dma_alloc_contiguous(dev, alloc_size, gfp);
106 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
107 dma_free_contiguous(dev, page, alloc_size);
112 page = alloc_pages_node(node, gfp, get_order(alloc_size));
113 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
114 dma_free_contiguous(dev, page, size);
117 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
118 phys_limit < DMA_BIT_MASK(64) &&
119 !(gfp & (GFP_DMA32 | GFP_DMA))) {
124 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
125 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
133 void *dma_direct_alloc_pages(struct device *dev, size_t size,
134 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
139 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
140 dma_alloc_need_uncached(dev, attrs) &&
141 !gfpflags_allow_blocking(gfp)) {
142 ret = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
148 page = __dma_direct_alloc_pages(dev, size, gfp, attrs);
152 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
153 !force_dma_unencrypted(dev)) {
154 /* remove any dirty cache lines on the kernel alias */
155 if (!PageHighMem(page))
156 arch_dma_prep_coherent(page, size);
157 /* return the page pointer as the opaque cookie */
162 if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
163 dma_alloc_need_uncached(dev, attrs)) ||
164 (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
165 /* remove any dirty cache lines on the kernel alias */
166 arch_dma_prep_coherent(page, PAGE_ALIGN(size));
168 /* create a coherent mapping */
169 ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
170 dma_pgprot(dev, PAGE_KERNEL, attrs),
171 __builtin_return_address(0));
173 dma_free_contiguous(dev, page, size);
177 memset(ret, 0, size);
181 if (PageHighMem(page)) {
183 * Depending on the cma= arguments and per-arch setup
184 * dma_alloc_contiguous could return highmem pages.
185 * Without remapping there is no way to return them here,
186 * so log an error and fail.
188 dev_info(dev, "Rejecting highmem page from CMA.\n");
189 dma_free_contiguous(dev, page, size);
193 ret = page_address(page);
194 if (force_dma_unencrypted(dev))
195 set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
197 memset(ret, 0, size);
199 if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
200 dma_alloc_need_uncached(dev, attrs)) {
201 arch_dma_prep_coherent(page, size);
202 ret = uncached_kernel_address(ret);
205 if (force_dma_unencrypted(dev))
206 *dma_handle = __phys_to_dma(dev, page_to_phys(page));
208 *dma_handle = phys_to_dma(dev, page_to_phys(page));
212 void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
213 dma_addr_t dma_addr, unsigned long attrs)
215 unsigned int page_order = get_order(size);
217 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
218 !force_dma_unencrypted(dev)) {
219 /* cpu_addr is a struct page cookie, not a kernel address */
220 dma_free_contiguous(dev, cpu_addr, size);
224 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
225 dma_free_from_pool(cpu_addr, PAGE_ALIGN(size)))
228 if (force_dma_unencrypted(dev))
229 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
231 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
234 dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
237 void *dma_direct_alloc(struct device *dev, size_t size,
238 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
240 if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
241 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
242 dma_alloc_need_uncached(dev, attrs))
243 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
244 return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
247 void dma_direct_free(struct device *dev, size_t size,
248 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
250 if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
251 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
252 dma_alloc_need_uncached(dev, attrs))
253 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
255 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
258 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
259 defined(CONFIG_SWIOTLB)
260 void dma_direct_sync_single_for_device(struct device *dev,
261 dma_addr_t addr, size_t size, enum dma_data_direction dir)
263 phys_addr_t paddr = dma_to_phys(dev, addr);
265 if (unlikely(is_swiotlb_buffer(paddr)))
266 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
268 if (!dev_is_dma_coherent(dev))
269 arch_sync_dma_for_device(paddr, size, dir);
271 EXPORT_SYMBOL(dma_direct_sync_single_for_device);
273 void dma_direct_sync_sg_for_device(struct device *dev,
274 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
276 struct scatterlist *sg;
279 for_each_sg(sgl, sg, nents, i) {
280 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
282 if (unlikely(is_swiotlb_buffer(paddr)))
283 swiotlb_tbl_sync_single(dev, paddr, sg->length,
284 dir, SYNC_FOR_DEVICE);
286 if (!dev_is_dma_coherent(dev))
287 arch_sync_dma_for_device(paddr, sg->length,
291 EXPORT_SYMBOL(dma_direct_sync_sg_for_device);
294 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
295 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
296 defined(CONFIG_SWIOTLB)
297 void dma_direct_sync_single_for_cpu(struct device *dev,
298 dma_addr_t addr, size_t size, enum dma_data_direction dir)
300 phys_addr_t paddr = dma_to_phys(dev, addr);
302 if (!dev_is_dma_coherent(dev)) {
303 arch_sync_dma_for_cpu(paddr, size, dir);
304 arch_sync_dma_for_cpu_all();
307 if (unlikely(is_swiotlb_buffer(paddr)))
308 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
310 EXPORT_SYMBOL(dma_direct_sync_single_for_cpu);
312 void dma_direct_sync_sg_for_cpu(struct device *dev,
313 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
315 struct scatterlist *sg;
318 for_each_sg(sgl, sg, nents, i) {
319 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
321 if (!dev_is_dma_coherent(dev))
322 arch_sync_dma_for_cpu(paddr, sg->length, dir);
324 if (unlikely(is_swiotlb_buffer(paddr)))
325 swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
329 if (!dev_is_dma_coherent(dev))
330 arch_sync_dma_for_cpu_all();
332 EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu);
334 void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
335 size_t size, enum dma_data_direction dir, unsigned long attrs)
337 phys_addr_t phys = dma_to_phys(dev, addr);
339 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
340 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
342 if (unlikely(is_swiotlb_buffer(phys)))
343 swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
345 EXPORT_SYMBOL(dma_direct_unmap_page);
347 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
348 int nents, enum dma_data_direction dir, unsigned long attrs)
350 struct scatterlist *sg;
353 for_each_sg(sgl, sg, nents, i)
354 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
357 EXPORT_SYMBOL(dma_direct_unmap_sg);
360 static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
363 return swiotlb_force != SWIOTLB_FORCE &&
364 dma_capable(dev, dma_addr, size, true);
367 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
368 unsigned long offset, size_t size, enum dma_data_direction dir,
371 phys_addr_t phys = page_to_phys(page) + offset;
372 dma_addr_t dma_addr = phys_to_dma(dev, phys);
374 if (unlikely(!dma_direct_possible(dev, dma_addr, size)) &&
375 !swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) {
376 report_addr(dev, dma_addr, size);
377 return DMA_MAPPING_ERROR;
380 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
381 arch_sync_dma_for_device(phys, size, dir);
384 EXPORT_SYMBOL(dma_direct_map_page);
386 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
387 enum dma_data_direction dir, unsigned long attrs)
390 struct scatterlist *sg;
392 for_each_sg(sgl, sg, nents, i) {
393 sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
394 sg->offset, sg->length, dir, attrs);
395 if (sg->dma_address == DMA_MAPPING_ERROR)
397 sg_dma_len(sg) = sg->length;
403 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
406 EXPORT_SYMBOL(dma_direct_map_sg);
408 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
409 size_t size, enum dma_data_direction dir, unsigned long attrs)
411 dma_addr_t dma_addr = paddr;
413 if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
414 report_addr(dev, dma_addr, size);
415 return DMA_MAPPING_ERROR;
420 EXPORT_SYMBOL(dma_direct_map_resource);
422 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
423 void *cpu_addr, dma_addr_t dma_addr, size_t size,
426 struct page *page = dma_direct_to_page(dev, dma_addr);
429 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
431 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
436 bool dma_direct_can_mmap(struct device *dev)
438 return dev_is_dma_coherent(dev) ||
439 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
442 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
443 void *cpu_addr, dma_addr_t dma_addr, size_t size,
446 unsigned long user_count = vma_pages(vma);
447 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
448 unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
451 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
453 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
456 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
458 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
459 user_count << PAGE_SHIFT, vma->vm_page_prot);
461 #else /* CONFIG_MMU */
462 bool dma_direct_can_mmap(struct device *dev)
467 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
468 void *cpu_addr, dma_addr_t dma_addr, size_t size,
473 #endif /* CONFIG_MMU */
476 * Because 32-bit DMA masks are so common we expect every architecture to be
477 * able to satisfy them - either by not supporting more physical memory, or by
478 * providing a ZONE_DMA32. If neither is the case, the architecture needs to
479 * use an IOMMU instead of the direct mapping.
481 int dma_direct_supported(struct device *dev, u64 mask)
485 if (IS_ENABLED(CONFIG_ZONE_DMA))
486 min_mask = DMA_BIT_MASK(zone_dma_bits);
488 min_mask = DMA_BIT_MASK(32);
490 min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
493 * This check needs to be against the actual bit mask value, so
494 * use __phys_to_dma() here so that the SME encryption mask isn't
497 return mask >= __phys_to_dma(dev, min_mask);
500 size_t dma_direct_max_mapping_size(struct device *dev)
502 /* If SWIOTLB is active, use its maximum mapping size */
503 if (is_swiotlb_active() &&
504 (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE))
505 return swiotlb_max_mapping_size(dev);