1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
5 * Provide default implementations of the DMA mapping callbacks for
6 * busses using the iommu infrastructure
9 #include <linux/dma-direct.h>
10 #include <linux/pci.h>
11 #include <asm/iommu.h>
14 * Generic iommu implementation
18 * The coherent mask may be smaller than the real mask, check if we can
19 * really use a direct window.
21 static inline bool dma_iommu_alloc_bypass(struct device *dev)
23 return dev->archdata.iommu_bypass && !iommu_fixed_is_weak &&
24 dma_direct_supported(dev, dev->coherent_dma_mask);
27 static inline bool dma_iommu_map_bypass(struct device *dev,
30 return dev->archdata.iommu_bypass &&
31 (!iommu_fixed_is_weak || (attrs & DMA_ATTR_WEAK_ORDERING));
34 /* Allocates a contiguous real buffer and creates mappings over it.
35 * Returns the virtual address of the buffer and sets dma_handle
36 * to the dma address (mapping) of the first page.
38 static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
39 dma_addr_t *dma_handle, gfp_t flag,
42 if (dma_iommu_alloc_bypass(dev))
43 return __dma_nommu_alloc_coherent(dev, size, dma_handle, flag,
45 return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
46 dma_handle, dev->coherent_dma_mask, flag,
50 static void dma_iommu_free_coherent(struct device *dev, size_t size,
51 void *vaddr, dma_addr_t dma_handle,
54 if (dma_iommu_alloc_bypass(dev))
55 __dma_nommu_free_coherent(dev, size, vaddr, dma_handle, attrs);
57 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr,
61 /* Creates TCEs for a user provided buffer. The user buffer must be
62 * contiguous real kernel storage (not vmalloc). The address passed here
63 * comprises a page address and offset into that page. The dma_addr_t
64 * returned will point to the same byte within the page as was passed in.
66 static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
67 unsigned long offset, size_t size,
68 enum dma_data_direction direction,
71 if (dma_iommu_map_bypass(dev, attrs))
72 return dma_nommu_map_page(dev, page, offset, size, direction,
74 return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
75 size, device_to_mask(dev), direction, attrs);
79 static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
80 size_t size, enum dma_data_direction direction,
83 if (!dma_iommu_map_bypass(dev, attrs))
84 iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size,
89 static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
90 int nelems, enum dma_data_direction direction,
93 if (dma_iommu_map_bypass(dev, attrs))
94 return dma_nommu_map_sg(dev, sglist, nelems, direction, attrs);
95 return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
96 device_to_mask(dev), direction, attrs);
99 static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
100 int nelems, enum dma_data_direction direction,
103 if (!dma_iommu_map_bypass(dev, attrs))
104 ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
108 static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
110 struct pci_dev *pdev = to_pci_dev(dev);
111 struct pci_controller *phb = pci_bus_to_host(pdev->bus);
113 return phb->controller_ops.iommu_bypass_supported &&
114 phb->controller_ops.iommu_bypass_supported(pdev, mask);
117 /* We support DMA to/from any memory page via the iommu */
118 int dma_iommu_dma_supported(struct device *dev, u64 mask)
120 struct iommu_table *tbl = get_iommu_table_base(dev);
123 dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx"
124 ", table unavailable\n", mask);
128 if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
129 dev->archdata.iommu_bypass = true;
130 dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
134 if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
135 dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
136 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
137 mask, tbl->it_offset << tbl->it_page_shift);
141 dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
142 dev->archdata.iommu_bypass = false;
146 u64 dma_iommu_get_required_mask(struct device *dev)
148 struct iommu_table *tbl = get_iommu_table_base(dev);
154 if (dev_is_pci(dev)) {
155 u64 bypass_mask = dma_direct_get_required_mask(dev);
157 if (dma_iommu_bypass_supported(dev, bypass_mask))
161 mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
167 const struct dma_map_ops dma_iommu_ops = {
168 .alloc = dma_iommu_alloc_coherent,
169 .free = dma_iommu_free_coherent,
170 .map_sg = dma_iommu_map_sg,
171 .unmap_sg = dma_iommu_unmap_sg,
172 .dma_supported = dma_iommu_dma_supported,
173 .map_page = dma_iommu_map_page,
174 .unmap_page = dma_iommu_unmap_page,
175 .get_required_mask = dma_iommu_get_required_mask,