1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Christoph Hellwig.
5 * DMA operations that map physical memory directly without using an IOMMU.
7 #include <linux/bootmem.h> /* for max_pfn */
8 #include <linux/export.h>
10 #include <linux/dma-direct.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-contiguous.h>
13 #include <linux/dma-noncoherent.h>
14 #include <linux/pfn.h>
15 #include <linux/set_memory.h>
17 #define DIRECT_MAPPING_ERROR 0
20 * Most architectures use ZONE_DMA for the first 16 Megabytes, but
21 * some use it for entirely different regions:
23 #ifndef ARCH_ZONE_DMA_BITS
24 #define ARCH_ZONE_DMA_BITS 24
28 * For AMD SEV all DMA must be to unencrypted addresses.
30 static inline bool force_dma_unencrypted(void)
36 check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
39 if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
42 "%s: call on device without dma_mask\n",
47 if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) {
49 "%s: overflow %pad+%zu of device mask %llx bus mask %llx\n",
50 caller, &dma_addr, size,
51 *dev->dma_mask, dev->bus_dma_mask);
58 static inline dma_addr_t phys_to_dma_direct(struct device *dev,
61 if (force_dma_unencrypted())
62 return __phys_to_dma(dev, phys);
63 return phys_to_dma(dev, phys);
66 u64 dma_direct_get_required_mask(struct device *dev)
68 u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
70 if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
71 max_dma = dev->bus_dma_mask;
73 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
76 static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
79 if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
80 dma_mask = dev->bus_dma_mask;
82 if (force_dma_unencrypted())
83 *phys_mask = __dma_to_phys(dev, dma_mask);
85 *phys_mask = dma_to_phys(dev, dma_mask);
88 * Optimistically try the zone that the physical address mask falls
89 * into first. If that returns memory that isn't actually addressable
90 * we will fallback to the next lower zone and try again.
92 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
95 if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
97 if (*phys_mask <= DMA_BIT_MASK(32))
102 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
104 return phys_to_dma_direct(dev, phys) + size - 1 <=
105 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
108 void *dma_direct_alloc_pages(struct device *dev, size_t size,
109 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
111 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
112 int page_order = get_order(size);
113 struct page *page = NULL;
117 if (attrs & DMA_ATTR_NO_WARN)
120 /* we always manually zero the memory once we are done: */
122 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
125 /* CMA can be used only in the context which permits sleeping */
126 if (gfpflags_allow_blocking(gfp)) {
127 page = dma_alloc_from_contiguous(dev, count, page_order,
129 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
130 dma_release_from_contiguous(dev, page, count);
135 page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
137 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
138 __free_pages(page, page_order);
141 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
142 phys_mask < DMA_BIT_MASK(64) &&
143 !(gfp & (GFP_DMA32 | GFP_DMA))) {
148 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
149 phys_mask < DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) {
150 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
157 ret = page_address(page);
158 if (force_dma_unencrypted()) {
159 set_memory_decrypted((unsigned long)ret, 1 << page_order);
160 *dma_handle = __phys_to_dma(dev, page_to_phys(page));
162 *dma_handle = phys_to_dma(dev, page_to_phys(page));
164 memset(ret, 0, size);
169 * NOTE: this function must never look at the dma_addr argument, because we want
170 * to be able to use it as a helper for iommu implementations as well.
172 void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
173 dma_addr_t dma_addr, unsigned long attrs)
175 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
176 unsigned int page_order = get_order(size);
178 if (force_dma_unencrypted())
179 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
180 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
181 free_pages((unsigned long)cpu_addr, page_order);
184 void *dma_direct_alloc(struct device *dev, size_t size,
185 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
187 if (!dev_is_dma_coherent(dev))
188 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
189 return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
192 void dma_direct_free(struct device *dev, size_t size,
193 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
195 if (!dev_is_dma_coherent(dev))
196 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
198 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
201 static void dma_direct_sync_single_for_device(struct device *dev,
202 dma_addr_t addr, size_t size, enum dma_data_direction dir)
204 if (dev_is_dma_coherent(dev))
206 arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
209 static void dma_direct_sync_sg_for_device(struct device *dev,
210 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
212 struct scatterlist *sg;
215 if (dev_is_dma_coherent(dev))
218 for_each_sg(sgl, sg, nents, i)
219 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
222 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
223 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
224 static void dma_direct_sync_single_for_cpu(struct device *dev,
225 dma_addr_t addr, size_t size, enum dma_data_direction dir)
227 if (dev_is_dma_coherent(dev))
229 arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
230 arch_sync_dma_for_cpu_all(dev);
233 static void dma_direct_sync_sg_for_cpu(struct device *dev,
234 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
236 struct scatterlist *sg;
239 if (dev_is_dma_coherent(dev))
242 for_each_sg(sgl, sg, nents, i)
243 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
244 arch_sync_dma_for_cpu_all(dev);
247 static void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
248 size_t size, enum dma_data_direction dir, unsigned long attrs)
250 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
251 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
254 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
255 int nents, enum dma_data_direction dir, unsigned long attrs)
257 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
258 dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
262 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
263 unsigned long offset, size_t size, enum dma_data_direction dir,
266 phys_addr_t phys = page_to_phys(page) + offset;
267 dma_addr_t dma_addr = phys_to_dma(dev, phys);
269 if (!check_addr(dev, dma_addr, size, __func__))
270 return DIRECT_MAPPING_ERROR;
272 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
273 dma_direct_sync_single_for_device(dev, dma_addr, size, dir);
277 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
278 enum dma_data_direction dir, unsigned long attrs)
281 struct scatterlist *sg;
283 for_each_sg(sgl, sg, nents, i) {
284 BUG_ON(!sg_page(sg));
286 sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
287 if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
289 sg_dma_len(sg) = sg->length;
292 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
293 dma_direct_sync_sg_for_device(dev, sgl, nents, dir);
298 * Because 32-bit DMA masks are so common we expect every architecture to be
299 * able to satisfy them - either by not supporting more physical memory, or by
300 * providing a ZONE_DMA32. If neither is the case, the architecture needs to
301 * use an IOMMU instead of the direct mapping.
303 int dma_direct_supported(struct device *dev, u64 mask)
307 if (IS_ENABLED(CONFIG_ZONE_DMA))
308 min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS);
310 min_mask = DMA_BIT_MASK(32);
312 min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
314 return mask >= phys_to_dma(dev, min_mask);
317 int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
319 return dma_addr == DIRECT_MAPPING_ERROR;
322 const struct dma_map_ops dma_direct_ops = {
323 .alloc = dma_direct_alloc,
324 .free = dma_direct_free,
325 .map_page = dma_direct_map_page,
326 .map_sg = dma_direct_map_sg,
327 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
328 .sync_single_for_device = dma_direct_sync_single_for_device,
329 .sync_sg_for_device = dma_direct_sync_sg_for_device,
331 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
332 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
333 .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
334 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
335 .unmap_page = dma_direct_unmap_page,
336 .unmap_sg = dma_direct_unmap_sg,
338 .get_required_mask = dma_direct_get_required_mask,
339 .dma_supported = dma_direct_supported,
340 .mapping_error = dma_direct_mapping_error,
341 .cache_sync = arch_dma_cache_sync,
343 EXPORT_SYMBOL(dma_direct_ops);