3 #include <linux/bitops.h>
6 * Public API for use by IOMMU drivers
18 * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
20 * @tlb_flush_all: Synchronously invalidate the entire TLB context.
21 * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
22 * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
23 * any corresponding page table updates are visible to the
26 * Note that these can all be called in atomic context and must therefore
29 struct iommu_gather_ops {
30 void (*tlb_flush_all)(void *cookie);
31 void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
32 bool leaf, void *cookie);
33 void (*tlb_sync)(void *cookie);
37 * struct io_pgtable_cfg - Configuration data for a set of page tables.
39 * @quirks: A bitmap of hardware quirks that require some special
40 * action by the low-level page table allocator.
41 * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
43 * @ias: Input address (iova) size, in bits.
44 * @oas: Output address (paddr) size, in bits.
45 * @tlb: TLB management callbacks for this set of tables.
46 * @iommu_dev: The device representing the DMA configuration for the
49 struct io_pgtable_cfg {
51 * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
52 * stage 1 PTEs, for hardware which insists on validating them
53 * even in non-secure state where they should normally be ignored.
55 * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
56 * IOMMU_NOEXEC flags and map everything with full access, for
57 * hardware which does not implement the permissions of a given
58 * format, and/or requires some format-specific default value.
60 * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
61 * (unmapped) entries but the hardware might do so anyway, perform
62 * TLB maintenance when mapping as well as when unmapping.
64 * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all
65 * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
66 * when the SoC is in "4GB mode" and they can only access the high
67 * remap of DRAM (0x1_00000000 to 0x1_ffffffff).
69 #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
70 #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
71 #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
72 #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
74 unsigned long pgsize_bitmap;
77 const struct iommu_gather_ops *tlb;
78 struct device *iommu_dev;
80 /* Low-level data specific to the table format */
103 * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
105 * @map: Map a physically contiguous memory region.
106 * @unmap: Unmap a physically contiguous memory region.
107 * @iova_to_phys: Translate iova to physical address.
109 * These functions map directly onto the iommu_ops member functions with
112 struct io_pgtable_ops {
113 int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
114 phys_addr_t paddr, size_t size, int prot);
115 int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
117 phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
122 * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
124 * @fmt: The page table format.
125 * @cfg: The page table configuration. This will be modified to represent
126 * the configuration actually provided by the allocator (e.g. the
127 * pgsize_bitmap may be restricted).
128 * @cookie: An opaque token provided by the IOMMU driver and passed back to
129 * the callback routines in cfg->tlb.
131 struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
132 struct io_pgtable_cfg *cfg,
136 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
137 * *must* ensure that the page table is no longer
138 * live, but the TLB can be dirty.
140 * @ops: The ops returned from alloc_io_pgtable_ops.
142 void free_io_pgtable_ops(struct io_pgtable_ops *ops);
146 * Internal structures for page table allocator implementations.
150 * struct io_pgtable - Internal structure describing a set of page tables.
152 * @fmt: The page table format.
153 * @cookie: An opaque token provided by the IOMMU driver and passed back to
154 * any callback routines.
155 * @tlb_sync_pending: Private flag for optimising out redundant syncs.
156 * @cfg: A copy of the page table configuration.
157 * @ops: The page table operations in use for this set of page tables.
160 enum io_pgtable_fmt fmt;
162 bool tlb_sync_pending;
163 struct io_pgtable_cfg cfg;
164 struct io_pgtable_ops ops;
167 #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
169 static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
171 iop->cfg.tlb->tlb_flush_all(iop->cookie);
172 iop->tlb_sync_pending = true;
175 static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
176 unsigned long iova, size_t size, size_t granule, bool leaf)
178 iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
179 iop->tlb_sync_pending = true;
182 static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
184 if (iop->tlb_sync_pending) {
185 iop->cfg.tlb->tlb_sync(iop->cookie);
186 iop->tlb_sync_pending = false;
191 * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
194 * @alloc: Allocate a set of page tables described by cfg.
195 * @free: Free the page tables associated with iop.
197 struct io_pgtable_init_fns {
198 struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
199 void (*free)(struct io_pgtable *iop);
202 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
203 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
204 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
205 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
206 extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
208 #endif /* __IO_PGTABLE_H */