1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 ARM Ltd.
4 * Copyright (c) 2014 The Linux Foundation
6 #include <linux/dma-direct.h>
7 #include <linux/dma-noncoherent.h>
8 #include <linux/dma-contiguous.h>
9 #include <linux/init.h>
10 #include <linux/genalloc.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
14 static struct vm_struct *__dma_common_pages_remap(struct page **pages,
15 size_t size, unsigned long vm_flags, pgprot_t prot,
18 struct vm_struct *area;
20 area = get_vm_area_caller(size, vm_flags, caller);
24 if (map_vm_area(area, prot, pages)) {
33 * Remaps an array of PAGE_SIZE pages into another vm_area.
34 * Cannot be used in non-sleeping contexts
36 void *dma_common_pages_remap(struct page **pages, size_t size,
37 unsigned long vm_flags, pgprot_t prot,
40 struct vm_struct *area;
42 area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
52 * Remaps an allocated contiguous region into another vm_area.
53 * Cannot be used in non-sleeping contexts
55 void *dma_common_contiguous_remap(struct page *page, size_t size,
56 unsigned long vm_flags,
57 pgprot_t prot, const void *caller)
61 struct vm_struct *area;
63 pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
67 for (i = 0; i < (size >> PAGE_SHIFT); i++)
68 pages[i] = nth_page(page, i);
70 area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
80 * Unmaps a range previously mapped by dma_common_*_remap
82 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
84 struct vm_struct *area = find_vm_area(cpu_addr);
86 if (!area || (area->flags & vm_flags) != vm_flags) {
87 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
91 unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
95 #ifdef CONFIG_DMA_DIRECT_REMAP
96 static struct gen_pool *atomic_pool __ro_after_init;
98 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
99 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
101 static int __init early_coherent_pool(char *p)
103 atomic_pool_size = memparse(p, &p);
106 early_param("coherent_pool", early_coherent_pool);
108 int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot)
110 unsigned int pool_size_order = get_order(atomic_pool_size);
111 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
116 if (dev_get_cma_area(NULL))
117 page = dma_alloc_from_contiguous(NULL, nr_pages,
118 pool_size_order, false);
120 page = alloc_pages(gfp, pool_size_order);
124 arch_dma_prep_coherent(page, atomic_pool_size);
126 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
130 addr = dma_common_contiguous_remap(page, atomic_pool_size, VM_USERMAP,
131 prot, __builtin_return_address(0));
133 goto destroy_genpool;
135 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
136 page_to_phys(page), atomic_pool_size, -1);
139 gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
141 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
142 atomic_pool_size / 1024);
146 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
148 gen_pool_destroy(atomic_pool);
151 if (!dma_release_from_contiguous(NULL, page, nr_pages))
152 __free_pages(page, pool_size_order);
154 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
155 atomic_pool_size / 1024);
159 bool dma_in_atomic_pool(void *start, size_t size)
161 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
164 void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
170 WARN(1, "coherent pool not initialised!\n");
174 val = gen_pool_alloc(atomic_pool, size);
176 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
178 *ret_page = pfn_to_page(__phys_to_pfn(phys));
180 memset(ptr, 0, size);
186 bool dma_free_from_pool(void *start, size_t size)
188 if (!dma_in_atomic_pool(start, size))
190 gen_pool_free(atomic_pool, (unsigned long)start, size);
194 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
195 gfp_t flags, unsigned long attrs)
197 struct page *page = NULL;
200 size = PAGE_ALIGN(size);
202 if (!gfpflags_allow_blocking(flags) &&
203 !(attrs & DMA_ATTR_NO_KERNEL_MAPPING)) {
204 ret = dma_alloc_from_pool(size, &page, flags);
210 page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
214 /* remove any dirty cache lines on the kernel alias */
215 arch_dma_prep_coherent(page, size);
217 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
218 ret = page; /* opaque cookie */
222 /* create a coherent mapping */
223 ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
224 arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs),
225 __builtin_return_address(0));
227 __dma_direct_free_pages(dev, size, page);
231 memset(ret, 0, size);
233 *dma_handle = phys_to_dma(dev, page_to_phys(page));
237 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
238 dma_addr_t dma_handle, unsigned long attrs)
240 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
241 /* vaddr is a struct page cookie, not a kernel address */
242 __dma_direct_free_pages(dev, size, vaddr);
243 } else if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) {
244 phys_addr_t phys = dma_to_phys(dev, dma_handle);
245 struct page *page = pfn_to_page(__phys_to_pfn(phys));
248 __dma_direct_free_pages(dev, size, page);
252 long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
255 return __phys_to_pfn(dma_to_phys(dev, dma_addr));
257 #endif /* CONFIG_DMA_DIRECT_REMAP */