1 // SPDX-License-Identifier: GPL-2.0-only
3 * PowerPC version derived from arch/arm/mm/consistent.c
4 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
6 * Copyright (C) 2000 Russell King
8 * Consistent memory allocators. Used for DMA devices that want to
9 * share uncached memory with the processor core. The function return
10 * is the virtual address and 'dma_handle' is the physical address.
11 * Mostly stolen from the ARM port, with some changes for PowerPC.
14 * Reorganized to get rid of the arch-specific consistent_* functions
15 * and provide non-coherent implementations for the DMA API. -Matt
17 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
18 * implementation. This is pulled straight from ARM and barely
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/kernel.h>
25 #include <linux/errno.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/highmem.h>
29 #include <linux/dma-direct.h>
30 #include <linux/dma-noncoherent.h>
31 #include <linux/export.h>
33 #include <asm/tlbflush.h>
36 #include <mm/mmu_decl.h>
39 * This address range defaults to a value that is safe for all
40 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
41 * can be further configured for specific applications under
42 * the "Advanced Setup" menu. -Matt
44 #define CONSISTENT_BASE (IOREMAP_TOP)
45 #define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
46 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
49 * This is the page table (2MB) covering uncached, DMA consistent allocations
51 static DEFINE_SPINLOCK(consistent_lock);
54 * VM region handling support.
56 * This should become something generic, handling VM region allocations for
57 * vmalloc and similar (ioremap, module space, etc).
59 * I envisage vmalloc()'s supporting vm_struct becoming:
62 * struct vm_region region;
63 * unsigned long flags;
64 * struct page **pages;
65 * unsigned int nr_pages;
66 * unsigned long phys_addr;
69 * get_vm_area() would then call vm_region_alloc with an appropriate
70 * struct vm_region head (eg):
72 * struct vm_region vmalloc_head = {
73 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
74 * .vm_start = VMALLOC_START,
75 * .vm_end = VMALLOC_END,
78 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
79 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
80 * would have to initialise this each time prior to calling vm_region_alloc().
82 struct ppc_vm_region {
83 struct list_head vm_list;
84 unsigned long vm_start;
88 static struct ppc_vm_region consistent_head = {
89 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
90 .vm_start = CONSISTENT_BASE,
91 .vm_end = CONSISTENT_END,
94 static struct ppc_vm_region *
95 ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
97 unsigned long addr = head->vm_start, end = head->vm_end - size;
99 struct ppc_vm_region *c, *new;
101 new = kmalloc(sizeof(struct ppc_vm_region), gfp);
105 spin_lock_irqsave(&consistent_lock, flags);
107 list_for_each_entry(c, &head->vm_list, vm_list) {
108 if ((addr + size) < addr)
110 if ((addr + size) <= c->vm_start)
119 * Insert this entry _before_ the one we found.
121 list_add_tail(&new->vm_list, &c->vm_list);
122 new->vm_start = addr;
123 new->vm_end = addr + size;
125 spin_unlock_irqrestore(&consistent_lock, flags);
129 spin_unlock_irqrestore(&consistent_lock, flags);
135 static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
137 struct ppc_vm_region *c;
139 list_for_each_entry(c, &head->vm_list, vm_list) {
140 if (c->vm_start == addr)
149 * Allocate DMA-coherent memory space and return both the kernel remapped
150 * virtual and bus address for that space.
152 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
153 gfp_t gfp, unsigned long attrs)
156 struct ppc_vm_region *c;
158 u64 mask = ISA_DMA_THRESHOLD, limit;
161 mask = dev->coherent_dma_mask;
164 * Sanity check the DMA mask - it must be non-zero, and
165 * must be able to be satisfied by a DMA allocation.
168 dev_warn(dev, "coherent DMA mask is unset\n");
172 if ((~mask) & ISA_DMA_THRESHOLD) {
173 dev_warn(dev, "coherent DMA mask %#llx is smaller "
174 "than system GFP_DMA mask %#llx\n",
175 mask, (unsigned long long)ISA_DMA_THRESHOLD);
181 size = PAGE_ALIGN(size);
182 limit = (mask + 1) & ~mask;
183 if ((limit && size >= limit) ||
184 size >= (CONSISTENT_END - CONSISTENT_BASE)) {
185 printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
190 order = get_order(size);
192 /* Might be useful if we ever have a real legacy DMA zone... */
193 if (mask != 0xffffffff)
196 page = alloc_pages(gfp, order);
201 * Invalidate any data that might be lurking in the
202 * kernel direct-mapped region for device DMA.
205 unsigned long kaddr = (unsigned long)page_address(page);
206 memset(page_address(page), 0, size);
207 flush_dcache_range(kaddr, kaddr + size);
211 * Allocate a virtual address in the consistent mapping region.
213 c = ppc_vm_region_alloc(&consistent_head, size,
214 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
216 unsigned long vaddr = c->vm_start;
217 struct page *end = page + (1 << order);
219 split_page(page, order);
222 * Set the "dma handle"
224 *dma_handle = phys_to_dma(dev, page_to_phys(page));
227 SetPageReserved(page);
228 map_kernel_page(vaddr, page_to_phys(page),
229 pgprot_noncached(PAGE_KERNEL));
232 } while (size -= PAGE_SIZE);
235 * Free the otherwise unused pages.
242 return (void *)c->vm_start;
246 __free_pages(page, order);
252 * free a page as defined by the above mapping.
254 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
255 dma_addr_t dma_handle, unsigned long attrs)
257 struct ppc_vm_region *c;
258 unsigned long flags, addr;
260 size = PAGE_ALIGN(size);
262 spin_lock_irqsave(&consistent_lock, flags);
264 c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
268 if ((c->vm_end - c->vm_start) != size) {
269 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
270 __func__, c->vm_end - c->vm_start, size);
272 size = c->vm_end - c->vm_start;
280 ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
284 if (!pte_none(*ptep) && pte_present(*ptep)) {
285 pfn = pte_pfn(*ptep);
286 pte_clear(&init_mm, addr, ptep);
287 if (pfn_valid(pfn)) {
288 struct page *page = pfn_to_page(pfn);
289 __free_reserved_page(page);
293 } while (size -= PAGE_SIZE);
295 flush_tlb_kernel_range(c->vm_start, c->vm_end);
297 list_del(&c->vm_list);
299 spin_unlock_irqrestore(&consistent_lock, flags);
305 spin_unlock_irqrestore(&consistent_lock, flags);
306 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
312 * make an area consistent.
314 static void __dma_sync(void *vaddr, size_t size, int direction)
316 unsigned long start = (unsigned long)vaddr;
317 unsigned long end = start + size;
322 case DMA_FROM_DEVICE:
324 * invalidate only when cache-line aligned otherwise there is
325 * the potential for discarding uncommitted data from the cache
327 if ((start | end) & (L1_CACHE_BYTES - 1))
328 flush_dcache_range(start, end);
330 invalidate_dcache_range(start, end);
332 case DMA_TO_DEVICE: /* writeback only */
333 clean_dcache_range(start, end);
335 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
336 flush_dcache_range(start, end);
341 #ifdef CONFIG_HIGHMEM
343 * __dma_sync_page() implementation for systems using highmem.
344 * In this case, each page of a buffer must be kmapped/kunmapped
345 * in order to have a virtual address for __dma_sync(). This must
346 * not sleep so kmap_atomic()/kunmap_atomic() are used.
348 * Note: yes, it is possible and correct to have a buffer extend
349 * beyond the first page.
351 static inline void __dma_sync_page_highmem(struct page *page,
352 unsigned long offset, size_t size, int direction)
354 size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
355 size_t cur_size = seg_size;
356 unsigned long flags, start, seg_offset = offset;
357 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
360 local_irq_save(flags);
363 start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
365 /* Sync this buffer segment */
366 __dma_sync((void *)start, seg_size, direction);
367 kunmap_atomic((void *)start);
370 /* Calculate next buffer segment size */
371 seg_size = min((size_t)PAGE_SIZE, size - cur_size);
373 /* Add the segment size to our running total */
374 cur_size += seg_size;
376 } while (seg_nr < nr_segs);
378 local_irq_restore(flags);
380 #endif /* CONFIG_HIGHMEM */
383 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
384 * takes a struct page instead of a virtual address
386 static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir)
388 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
389 unsigned offset = paddr & ~PAGE_MASK;
391 #ifdef CONFIG_HIGHMEM
392 __dma_sync_page_highmem(page, offset, size, dir);
394 unsigned long start = (unsigned long)page_address(page) + offset;
395 __dma_sync((void *)start, size, dir);
399 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
400 size_t size, enum dma_data_direction dir)
402 __dma_sync_page(paddr, size, dir);
405 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
406 size_t size, enum dma_data_direction dir)
408 __dma_sync_page(paddr, size, dir);
412 * Return the PFN for a given cpu virtual address returned by arch_dma_alloc.
414 long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
417 /* This should always be populated, so we don't test every
418 * level. If that fails, we'll have a nice crash which
419 * will be as good as a BUG_ON()
421 unsigned long cpu_addr = (unsigned long)vaddr;
422 pgd_t *pgd = pgd_offset_k(cpu_addr);
423 pud_t *pud = pud_offset(pgd, cpu_addr);
424 pmd_t *pmd = pmd_offset(pud, cpu_addr);
425 pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);
427 if (pte_none(*ptep) || !pte_present(*ptep))
429 return pte_pfn(*ptep);