2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 * Provide default implementations of the DMA mapping callbacks for
5 * directly mapped busses.
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-debug.h>
11 #include <linux/gfp.h>
12 #include <linux/memblock.h>
13 #include <linux/export.h>
14 #include <linux/pci.h>
17 #include <asm/machdep.h>
18 #include <asm/swiotlb.h>
19 #include <asm/iommu.h>
22 * Generic direct DMA implementation
24 * This implementation supports a per-device offset that can be applied if
25 * the address at which memory is visible to devices is not 0. Platform code
26 * can set archdata.dma_data to an unsigned long holding the offset. By
27 * default the offset is PCI_DRAM_OFFSET.
30 static u64 __maybe_unused get_pfn_limit(struct device *dev)
32 u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1;
33 struct dev_archdata __maybe_unused *sd = &dev->archdata;
36 if (sd->max_direct_dma_addr && dev->dma_ops == &powerpc_swiotlb_dma_ops)
37 pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
43 int dma_nommu_dma_supported(struct device *dev, u64 mask)
46 u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
48 /* Limit fits in the mask, we are good */
54 * Freescale gets another chance via ZONE_DMA, however
55 * that will have to be refined if/when they support iommus
66 #ifndef CONFIG_NOT_COHERENT_CACHE
67 void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
68 dma_addr_t *dma_handle, gfp_t flag,
73 int node = dev_to_node(dev);
75 u64 pfn = get_pfn_limit(dev);
79 * This code should be OK on other platforms, but we have drivers that
80 * don't set coherent_dma_mask. As a workaround we just ifdef it. This
81 * whole routine needs some serious cleanup.
84 zone = dma_pfn_limit_to_zone(pfn);
86 dev_err(dev, "%s: No suitable zone for pfn %#llx\n",
92 #ifdef CONFIG_ZONE_DMA
98 #endif /* CONFIG_FSL_SOC */
100 page = alloc_pages_node(node, flag, get_order(size));
103 ret = page_address(page);
104 memset(ret, 0, size);
105 *dma_handle = __pa(ret) + get_dma_offset(dev);
110 void __dma_nommu_free_coherent(struct device *dev, size_t size,
111 void *vaddr, dma_addr_t dma_handle,
114 free_pages((unsigned long)vaddr, get_order(size));
116 #endif /* !CONFIG_NOT_COHERENT_CACHE */
118 static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
119 dma_addr_t *dma_handle, gfp_t flag,
122 struct iommu_table *iommu;
124 /* The coherent mask may be smaller than the real mask, check if
125 * we can really use the direct ops
127 if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask))
128 return __dma_nommu_alloc_coherent(dev, size, dma_handle,
131 /* Ok we can't ... do we have an iommu ? If not, fail */
132 iommu = get_iommu_table_base(dev);
136 /* Try to use the iommu */
137 return iommu_alloc_coherent(dev, iommu, size, dma_handle,
138 dev->coherent_dma_mask, flag,
142 static void dma_nommu_free_coherent(struct device *dev, size_t size,
143 void *vaddr, dma_addr_t dma_handle,
146 struct iommu_table *iommu;
148 /* See comments in dma_nommu_alloc_coherent() */
149 if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask))
150 return __dma_nommu_free_coherent(dev, size, vaddr, dma_handle,
152 /* Maybe we used an iommu ... */
153 iommu = get_iommu_table_base(dev);
155 /* If we hit that we should have never allocated in the first
156 * place so how come we are freeing ?
160 iommu_free_coherent(iommu, size, vaddr, dma_handle);
163 int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
164 void *cpu_addr, dma_addr_t handle, size_t size,
169 #ifdef CONFIG_NOT_COHERENT_CACHE
170 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
171 pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
173 pfn = page_to_pfn(virt_to_page(cpu_addr));
175 return remap_pfn_range(vma, vma->vm_start,
177 vma->vm_end - vma->vm_start,
181 int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
182 int nents, enum dma_data_direction direction,
185 struct scatterlist *sg;
188 for_each_sg(sgl, sg, nents, i) {
189 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
190 sg->dma_length = sg->length;
192 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
195 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
201 static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
202 int nents, enum dma_data_direction direction,
205 struct scatterlist *sg;
208 for_each_sg(sgl, sg, nents, i)
209 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
212 u64 dma_nommu_get_required_mask(struct device *dev)
216 end = memblock_end_of_DRAM() + get_dma_offset(dev);
218 mask = 1ULL << (fls64(end) - 1);
224 dma_addr_t dma_nommu_map_page(struct device *dev, struct page *page,
225 unsigned long offset, size_t size,
226 enum dma_data_direction dir, unsigned long attrs)
228 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
229 __dma_sync_page(page, offset, size, dir);
231 return page_to_phys(page) + offset + get_dma_offset(dev);
234 static inline void dma_nommu_unmap_page(struct device *dev,
235 dma_addr_t dma_address,
237 enum dma_data_direction direction,
240 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
241 __dma_sync(bus_to_virt(dma_address), size, direction);
244 #ifdef CONFIG_NOT_COHERENT_CACHE
245 static inline void dma_nommu_sync_sg(struct device *dev,
246 struct scatterlist *sgl, int nents,
247 enum dma_data_direction direction)
249 struct scatterlist *sg;
252 for_each_sg(sgl, sg, nents, i)
253 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
256 static inline void dma_nommu_sync_single(struct device *dev,
257 dma_addr_t dma_handle, size_t size,
258 enum dma_data_direction direction)
260 __dma_sync(bus_to_virt(dma_handle), size, direction);
264 const struct dma_map_ops dma_nommu_ops = {
265 .alloc = dma_nommu_alloc_coherent,
266 .free = dma_nommu_free_coherent,
267 .mmap = dma_nommu_mmap_coherent,
268 .map_sg = dma_nommu_map_sg,
269 .unmap_sg = dma_nommu_unmap_sg,
270 .dma_supported = dma_nommu_dma_supported,
271 .map_page = dma_nommu_map_page,
272 .unmap_page = dma_nommu_unmap_page,
273 .get_required_mask = dma_nommu_get_required_mask,
274 #ifdef CONFIG_NOT_COHERENT_CACHE
275 .sync_single_for_cpu = dma_nommu_sync_single,
276 .sync_single_for_device = dma_nommu_sync_single,
277 .sync_sg_for_cpu = dma_nommu_sync_sg,
278 .sync_sg_for_device = dma_nommu_sync_sg,
281 EXPORT_SYMBOL(dma_nommu_ops);
283 int dma_set_coherent_mask(struct device *dev, u64 mask)
285 if (!dma_supported(dev, mask)) {
287 * We need to special case the direct DMA ops which can
288 * support a fallback for coherent allocations. There
289 * is no dma_op->set_coherent_mask() so we have to do
290 * things the hard way:
292 if (get_dma_ops(dev) != &dma_nommu_ops ||
293 get_iommu_table_base(dev) == NULL ||
294 !dma_iommu_dma_supported(dev, mask))
297 dev->coherent_dma_mask = mask;
300 EXPORT_SYMBOL(dma_set_coherent_mask);
302 int dma_set_mask(struct device *dev, u64 dma_mask)
304 if (ppc_md.dma_set_mask)
305 return ppc_md.dma_set_mask(dev, dma_mask);
307 if (dev_is_pci(dev)) {
308 struct pci_dev *pdev = to_pci_dev(dev);
309 struct pci_controller *phb = pci_bus_to_host(pdev->bus);
310 if (phb->controller_ops.dma_set_mask)
311 return phb->controller_ops.dma_set_mask(pdev, dma_mask);
314 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
316 *dev->dma_mask = dma_mask;
319 EXPORT_SYMBOL(dma_set_mask);
321 static int __init dma_init(void)
324 dma_debug_add_bus(&vio_bus_type);
329 fs_initcall(dma_init);