1 /* SPDX-License-Identifier: GPL-2.0 */
4 #include <linux/bitops.h>
7 * Public API for use by IOMMU drivers
20 * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
22 * @tlb_flush_all: Synchronously invalidate the entire TLB context.
23 * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
24 * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
25 * any corresponding page table updates are visible to the
28 * Note that these can all be called in atomic context and must therefore
31 struct iommu_gather_ops {
32 void (*tlb_flush_all)(void *cookie);
33 void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
34 bool leaf, void *cookie);
35 void (*tlb_sync)(void *cookie);
39 * struct io_pgtable_cfg - Configuration data for a set of page tables.
41 * @quirks: A bitmap of hardware quirks that require some special
42 * action by the low-level page table allocator.
43 * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
45 * @ias: Input address (iova) size, in bits.
46 * @oas: Output address (paddr) size, in bits.
47 * @coherent_walk A flag to indicate whether or not page table walks made
48 * by the IOMMU are coherent with the CPU caches.
49 * @tlb: TLB management callbacks for this set of tables.
50 * @iommu_dev: The device representing the DMA configuration for the
53 struct io_pgtable_cfg {
55 * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
56 * stage 1 PTEs, for hardware which insists on validating them
57 * even in non-secure state where they should normally be ignored.
59 * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
60 * IOMMU_NOEXEC flags and map everything with full access, for
61 * hardware which does not implement the permissions of a given
62 * format, and/or requires some format-specific default value.
64 * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
65 * (unmapped) entries but the hardware might do so anyway, perform
66 * TLB maintenance when mapping as well as when unmapping.
68 * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all
69 * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
70 * when the SoC is in "4GB mode" and they can only access the high
71 * remap of DRAM (0x1_00000000 to 0x1_ffffffff).
73 * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
74 * on unmap, for DMA domains using the flush queue mechanism for
75 * delayed invalidation.
77 #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
78 #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
79 #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
80 #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
81 #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4)
83 unsigned long pgsize_bitmap;
87 const struct iommu_gather_ops *tlb;
88 struct device *iommu_dev;
90 /* Low-level data specific to the table format */
118 * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
120 * @map: Map a physically contiguous memory region.
121 * @unmap: Unmap a physically contiguous memory region.
122 * @iova_to_phys: Translate iova to physical address.
124 * These functions map directly onto the iommu_ops member functions with
127 struct io_pgtable_ops {
128 int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
129 phys_addr_t paddr, size_t size, int prot);
130 size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
132 phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
137 * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
139 * @fmt: The page table format.
140 * @cfg: The page table configuration. This will be modified to represent
141 * the configuration actually provided by the allocator (e.g. the
142 * pgsize_bitmap may be restricted).
143 * @cookie: An opaque token provided by the IOMMU driver and passed back to
144 * the callback routines in cfg->tlb.
146 struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
147 struct io_pgtable_cfg *cfg,
151 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
152 * *must* ensure that the page table is no longer
153 * live, but the TLB can be dirty.
155 * @ops: The ops returned from alloc_io_pgtable_ops.
157 void free_io_pgtable_ops(struct io_pgtable_ops *ops);
161 * Internal structures for page table allocator implementations.
165 * struct io_pgtable - Internal structure describing a set of page tables.
167 * @fmt: The page table format.
168 * @cookie: An opaque token provided by the IOMMU driver and passed back to
169 * any callback routines.
170 * @cfg: A copy of the page table configuration.
171 * @ops: The page table operations in use for this set of page tables.
174 enum io_pgtable_fmt fmt;
176 struct io_pgtable_cfg cfg;
177 struct io_pgtable_ops ops;
180 #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
182 static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
184 iop->cfg.tlb->tlb_flush_all(iop->cookie);
187 static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
188 unsigned long iova, size_t size, size_t granule, bool leaf)
190 iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
193 static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
195 iop->cfg.tlb->tlb_sync(iop->cookie);
199 * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
202 * @alloc: Allocate a set of page tables described by cfg.
203 * @free: Free the page tables associated with iop.
205 struct io_pgtable_init_fns {
206 struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
207 void (*free)(struct io_pgtable *iop);
210 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
211 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
212 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
213 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
214 extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
215 extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
217 #endif /* __IO_PGTABLE_H */