]> asedeno.scripts.mit.edu Git - linux.git/blob - kernel/dma/direct.c
Merge tag 'char-misc-4.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregk...
[linux.git] / kernel / dma / direct.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018 Christoph Hellwig.
4  *
5  * DMA operations that map physical memory directly without using an IOMMU.
6  */
7 #include <linux/bootmem.h> /* for max_pfn */
8 #include <linux/export.h>
9 #include <linux/mm.h>
10 #include <linux/dma-direct.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-contiguous.h>
13 #include <linux/dma-noncoherent.h>
14 #include <linux/pfn.h>
15 #include <linux/set_memory.h>
16
17 #define DIRECT_MAPPING_ERROR            0
18
19 /*
20  * Most architectures use ZONE_DMA for the first 16 Megabytes, but
21  * some use it for entirely different regions:
22  */
23 #ifndef ARCH_ZONE_DMA_BITS
24 #define ARCH_ZONE_DMA_BITS 24
25 #endif
26
27 /*
28  * For AMD SEV all DMA must be to unencrypted addresses.
29  */
30 static inline bool force_dma_unencrypted(void)
31 {
32         return sev_active();
33 }
34
35 static bool
36 check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
37                 const char *caller)
38 {
39         if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
40                 if (!dev->dma_mask) {
41                         dev_err(dev,
42                                 "%s: call on device without dma_mask\n",
43                                 caller);
44                         return false;
45                 }
46
47                 if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) {
48                         dev_err(dev,
49                                 "%s: overflow %pad+%zu of device mask %llx bus mask %llx\n",
50                                 caller, &dma_addr, size,
51                                 *dev->dma_mask, dev->bus_dma_mask);
52                 }
53                 return false;
54         }
55         return true;
56 }
57
58 static inline dma_addr_t phys_to_dma_direct(struct device *dev,
59                 phys_addr_t phys)
60 {
61         if (force_dma_unencrypted())
62                 return __phys_to_dma(dev, phys);
63         return phys_to_dma(dev, phys);
64 }
65
66 u64 dma_direct_get_required_mask(struct device *dev)
67 {
68         u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
69
70         if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
71                 max_dma = dev->bus_dma_mask;
72
73         return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
74 }
75
76 static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
77                 u64 *phys_mask)
78 {
79         if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
80                 dma_mask = dev->bus_dma_mask;
81
82         if (force_dma_unencrypted())
83                 *phys_mask = __dma_to_phys(dev, dma_mask);
84         else
85                 *phys_mask = dma_to_phys(dev, dma_mask);
86
87         /*
88          * Optimistically try the zone that the physical address mask falls
89          * into first.  If that returns memory that isn't actually addressable
90          * we will fallback to the next lower zone and try again.
91          *
92          * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
93          * zones.
94          */
95         if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
96                 return GFP_DMA;
97         if (*phys_mask <= DMA_BIT_MASK(32))
98                 return GFP_DMA32;
99         return 0;
100 }
101
102 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
103 {
104         return phys_to_dma_direct(dev, phys) + size - 1 <=
105                         min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
106 }
107
108 void *dma_direct_alloc_pages(struct device *dev, size_t size,
109                 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
110 {
111         unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
112         int page_order = get_order(size);
113         struct page *page = NULL;
114         u64 phys_mask;
115         void *ret;
116
117         if (attrs & DMA_ATTR_NO_WARN)
118                 gfp |= __GFP_NOWARN;
119
120         /* we always manually zero the memory once we are done: */
121         gfp &= ~__GFP_ZERO;
122         gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
123                         &phys_mask);
124 again:
125         /* CMA can be used only in the context which permits sleeping */
126         if (gfpflags_allow_blocking(gfp)) {
127                 page = dma_alloc_from_contiguous(dev, count, page_order,
128                                                  gfp & __GFP_NOWARN);
129                 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
130                         dma_release_from_contiguous(dev, page, count);
131                         page = NULL;
132                 }
133         }
134         if (!page)
135                 page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
136
137         if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
138                 __free_pages(page, page_order);
139                 page = NULL;
140
141                 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
142                     phys_mask < DMA_BIT_MASK(64) &&
143                     !(gfp & (GFP_DMA32 | GFP_DMA))) {
144                         gfp |= GFP_DMA32;
145                         goto again;
146                 }
147
148                 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
149                     phys_mask < DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) {
150                         gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
151                         goto again;
152                 }
153         }
154
155         if (!page)
156                 return NULL;
157         ret = page_address(page);
158         if (force_dma_unencrypted()) {
159                 set_memory_decrypted((unsigned long)ret, 1 << page_order);
160                 *dma_handle = __phys_to_dma(dev, page_to_phys(page));
161         } else {
162                 *dma_handle = phys_to_dma(dev, page_to_phys(page));
163         }
164         memset(ret, 0, size);
165         return ret;
166 }
167
168 /*
169  * NOTE: this function must never look at the dma_addr argument, because we want
170  * to be able to use it as a helper for iommu implementations as well.
171  */
172 void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
173                 dma_addr_t dma_addr, unsigned long attrs)
174 {
175         unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
176         unsigned int page_order = get_order(size);
177
178         if (force_dma_unencrypted())
179                 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
180         if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
181                 free_pages((unsigned long)cpu_addr, page_order);
182 }
183
184 void *dma_direct_alloc(struct device *dev, size_t size,
185                 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
186 {
187         if (!dev_is_dma_coherent(dev))
188                 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
189         return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
190 }
191
192 void dma_direct_free(struct device *dev, size_t size,
193                 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
194 {
195         if (!dev_is_dma_coherent(dev))
196                 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
197         else
198                 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
199 }
200
201 static void dma_direct_sync_single_for_device(struct device *dev,
202                 dma_addr_t addr, size_t size, enum dma_data_direction dir)
203 {
204         if (dev_is_dma_coherent(dev))
205                 return;
206         arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
207 }
208
209 static void dma_direct_sync_sg_for_device(struct device *dev,
210                 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
211 {
212         struct scatterlist *sg;
213         int i;
214
215         if (dev_is_dma_coherent(dev))
216                 return;
217
218         for_each_sg(sgl, sg, nents, i)
219                 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
220 }
221
222 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
223     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
224 static void dma_direct_sync_single_for_cpu(struct device *dev,
225                 dma_addr_t addr, size_t size, enum dma_data_direction dir)
226 {
227         if (dev_is_dma_coherent(dev))
228                 return;
229         arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
230         arch_sync_dma_for_cpu_all(dev);
231 }
232
233 static void dma_direct_sync_sg_for_cpu(struct device *dev,
234                 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
235 {
236         struct scatterlist *sg;
237         int i;
238
239         if (dev_is_dma_coherent(dev))
240                 return;
241
242         for_each_sg(sgl, sg, nents, i)
243                 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
244         arch_sync_dma_for_cpu_all(dev);
245 }
246
247 static void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
248                 size_t size, enum dma_data_direction dir, unsigned long attrs)
249 {
250         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
251                 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
252 }
253
254 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
255                 int nents, enum dma_data_direction dir, unsigned long attrs)
256 {
257         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
258                 dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
259 }
260 #endif
261
262 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
263                 unsigned long offset, size_t size, enum dma_data_direction dir,
264                 unsigned long attrs)
265 {
266         phys_addr_t phys = page_to_phys(page) + offset;
267         dma_addr_t dma_addr = phys_to_dma(dev, phys);
268
269         if (!check_addr(dev, dma_addr, size, __func__))
270                 return DIRECT_MAPPING_ERROR;
271
272         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
273                 dma_direct_sync_single_for_device(dev, dma_addr, size, dir);
274         return dma_addr;
275 }
276
277 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
278                 enum dma_data_direction dir, unsigned long attrs)
279 {
280         int i;
281         struct scatterlist *sg;
282
283         for_each_sg(sgl, sg, nents, i) {
284                 BUG_ON(!sg_page(sg));
285
286                 sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
287                 if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
288                         return 0;
289                 sg_dma_len(sg) = sg->length;
290         }
291
292         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
293                 dma_direct_sync_sg_for_device(dev, sgl, nents, dir);
294         return nents;
295 }
296
297 /*
298  * Because 32-bit DMA masks are so common we expect every architecture to be
299  * able to satisfy them - either by not supporting more physical memory, or by
300  * providing a ZONE_DMA32.  If neither is the case, the architecture needs to
301  * use an IOMMU instead of the direct mapping.
302  */
303 int dma_direct_supported(struct device *dev, u64 mask)
304 {
305         u64 min_mask;
306
307         if (IS_ENABLED(CONFIG_ZONE_DMA))
308                 min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS);
309         else
310                 min_mask = DMA_BIT_MASK(32);
311
312         min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
313
314         return mask >= phys_to_dma(dev, min_mask);
315 }
316
317 int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
318 {
319         return dma_addr == DIRECT_MAPPING_ERROR;
320 }
321
322 const struct dma_map_ops dma_direct_ops = {
323         .alloc                  = dma_direct_alloc,
324         .free                   = dma_direct_free,
325         .map_page               = dma_direct_map_page,
326         .map_sg                 = dma_direct_map_sg,
327 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
328         .sync_single_for_device = dma_direct_sync_single_for_device,
329         .sync_sg_for_device     = dma_direct_sync_sg_for_device,
330 #endif
331 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
332     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
333         .sync_single_for_cpu    = dma_direct_sync_single_for_cpu,
334         .sync_sg_for_cpu        = dma_direct_sync_sg_for_cpu,
335         .unmap_page             = dma_direct_unmap_page,
336         .unmap_sg               = dma_direct_unmap_sg,
337 #endif
338         .get_required_mask      = dma_direct_get_required_mask,
339         .dma_supported          = dma_direct_supported,
340         .mapping_error          = dma_direct_mapping_error,
341         .cache_sync             = arch_dma_cache_sync,
342 };
343 EXPORT_SYMBOL(dma_direct_ops);