1 // SPDX-License-Identifier: GPL-2.0
3 * Coherent per-device memory handling.
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/dma-mapping.h>
12 struct dma_coherent_mem {
14 dma_addr_t device_base;
15 unsigned long pfn_base;
17 unsigned long *bitmap;
19 bool use_dev_dma_pfn_offset;
22 static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
24 static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
26 if (dev && dev->dma_mem)
31 static inline dma_addr_t dma_get_device_base(struct device *dev,
32 struct dma_coherent_mem * mem)
34 if (mem->use_dev_dma_pfn_offset)
35 return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
37 return mem->device_base;
40 static int dma_init_coherent_memory(phys_addr_t phys_addr,
41 dma_addr_t device_addr, size_t size,
42 struct dma_coherent_mem **mem)
44 struct dma_coherent_mem *dma_mem = NULL;
45 void *mem_base = NULL;
46 int pages = size >> PAGE_SHIFT;
47 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
55 mem_base = memremap(phys_addr, size, MEMREMAP_WC);
60 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
65 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
66 if (!dma_mem->bitmap) {
71 dma_mem->virt_base = mem_base;
72 dma_mem->device_base = device_addr;
73 dma_mem->pfn_base = PFN_DOWN(phys_addr);
74 dma_mem->size = pages;
75 spin_lock_init(&dma_mem->spinlock);
87 static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
92 memunmap(mem->virt_base);
97 static int dma_assign_coherent_memory(struct device *dev,
98 struct dma_coherent_mem *mem)
110 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
111 dma_addr_t device_addr, size_t size)
113 struct dma_coherent_mem *mem;
116 ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem);
120 ret = dma_assign_coherent_memory(dev, mem);
122 dma_release_coherent_memory(mem);
125 EXPORT_SYMBOL(dma_declare_coherent_memory);
127 void dma_release_declared_memory(struct device *dev)
129 struct dma_coherent_mem *mem = dev->dma_mem;
133 dma_release_coherent_memory(mem);
136 EXPORT_SYMBOL(dma_release_declared_memory);
138 static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
139 ssize_t size, dma_addr_t *dma_handle)
141 int order = get_order(size);
146 spin_lock_irqsave(&mem->spinlock, flags);
148 if (unlikely(size > (mem->size << PAGE_SHIFT)))
151 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
152 if (unlikely(pageno < 0))
156 * Memory was found in the coherent area.
158 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
159 ret = mem->virt_base + (pageno << PAGE_SHIFT);
160 spin_unlock_irqrestore(&mem->spinlock, flags);
161 memset(ret, 0, size);
164 spin_unlock_irqrestore(&mem->spinlock, flags);
169 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
170 * @dev: device from which we allocate memory
171 * @size: size of requested memory area
172 * @dma_handle: This will be filled with the correct dma handle
173 * @ret: This pointer will be filled with the virtual address
176 * This function should be only called from per-arch dma_alloc_coherent()
177 * to support allocation from per-device coherent memory pools.
179 * Returns 0 if dma_alloc_coherent should continue with allocating from
180 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
182 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
183 dma_addr_t *dma_handle, void **ret)
185 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
190 *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
194 void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
196 if (!dma_coherent_default_memory)
199 return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
203 static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
204 int order, void *vaddr)
206 if (mem && vaddr >= mem->virt_base && vaddr <
207 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
208 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
211 spin_lock_irqsave(&mem->spinlock, flags);
212 bitmap_release_region(mem->bitmap, page, order);
213 spin_unlock_irqrestore(&mem->spinlock, flags);
220 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
221 * @dev: device from which the memory was allocated
222 * @order: the order of pages allocated
223 * @vaddr: virtual address of allocated pages
225 * This checks whether the memory was allocated from the per-device
226 * coherent memory pool and if so, releases that memory.
228 * Returns 1 if we correctly released the memory, or 0 if the caller should
229 * proceed with releasing memory from generic pools.
231 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
233 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
235 return __dma_release_from_coherent(mem, order, vaddr);
238 int dma_release_from_global_coherent(int order, void *vaddr)
240 if (!dma_coherent_default_memory)
243 return __dma_release_from_coherent(dma_coherent_default_memory, order,
247 static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
248 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
250 if (mem && vaddr >= mem->virt_base && vaddr + size <=
251 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
252 unsigned long off = vma->vm_pgoff;
253 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
254 int user_count = vma_pages(vma);
255 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
258 if (off < count && user_count <= count - off) {
259 unsigned long pfn = mem->pfn_base + start + off;
260 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
261 user_count << PAGE_SHIFT,
270 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
271 * @dev: device from which the memory was allocated
272 * @vma: vm_area for the userspace memory
273 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
274 * @size: size of the memory buffer allocated
275 * @ret: result from remap_pfn_range()
277 * This checks whether the memory was allocated from the per-device
278 * coherent memory pool and if so, maps that memory to the provided vma.
280 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
281 * should return @ret, or 0 if they should proceed with mapping memory from
284 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
285 void *vaddr, size_t size, int *ret)
287 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
289 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
291 EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
293 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
294 size_t size, int *ret)
296 if (!dma_coherent_default_memory)
299 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
304 * Support for reserved memory regions defined in device tree
306 #ifdef CONFIG_OF_RESERVED_MEM
307 #include <linux/of.h>
308 #include <linux/of_fdt.h>
309 #include <linux/of_reserved_mem.h>
311 static struct reserved_mem *dma_reserved_default_memory __initdata;
313 static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
315 struct dma_coherent_mem *mem = rmem->priv;
319 ret = dma_init_coherent_memory(rmem->base, rmem->base,
322 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
323 &rmem->base, (unsigned long)rmem->size / SZ_1M);
327 mem->use_dev_dma_pfn_offset = true;
329 dma_assign_coherent_memory(dev, mem);
333 static void rmem_dma_device_release(struct reserved_mem *rmem,
340 static const struct reserved_mem_ops rmem_dma_ops = {
341 .device_init = rmem_dma_device_init,
342 .device_release = rmem_dma_device_release,
345 static int __init rmem_dma_setup(struct reserved_mem *rmem)
347 unsigned long node = rmem->fdt_node;
349 if (of_get_flat_dt_prop(node, "reusable", NULL))
353 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
354 pr_err("Reserved memory: regions without no-map are not yet supported\n");
358 if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
359 WARN(dma_reserved_default_memory,
360 "Reserved memory: region for default DMA coherent area is redefined\n");
361 dma_reserved_default_memory = rmem;
365 rmem->ops = &rmem_dma_ops;
366 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
367 &rmem->base, (unsigned long)rmem->size / SZ_1M);
371 static int __init dma_init_reserved_memory(void)
373 const struct reserved_mem_ops *ops;
376 if (!dma_reserved_default_memory)
379 ops = dma_reserved_default_memory->ops;
382 * We rely on rmem_dma_device_init() does not propagate error of
383 * dma_assign_coherent_memory() for "NULL" device.
385 ret = ops->device_init(dma_reserved_default_memory, NULL);
388 dma_coherent_default_memory = dma_reserved_default_memory->priv;
389 pr_info("DMA: default coherent area is set\n");
395 core_initcall(dma_init_reserved_memory);
397 RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);