2 * IOMMU API for ARM architected SMMUv3 implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2015 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
20 * This driver is powered by bad coffee and bombay mix.
23 #include <linux/delay.h>
24 #include <linux/dma-iommu.h>
25 #include <linux/err.h>
26 #include <linux/interrupt.h>
27 #include <linux/iommu.h>
28 #include <linux/iopoll.h>
29 #include <linux/module.h>
30 #include <linux/msi.h>
32 #include <linux/of_address.h>
33 #include <linux/of_platform.h>
34 #include <linux/pci.h>
35 #include <linux/platform_device.h>
37 #include "io-pgtable.h"
40 #define ARM_SMMU_IDR0 0x0
41 #define IDR0_ST_LVL_SHIFT 27
42 #define IDR0_ST_LVL_MASK 0x3
43 #define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
44 #define IDR0_STALL_MODEL_SHIFT 24
45 #define IDR0_STALL_MODEL_MASK 0x3
46 #define IDR0_STALL_MODEL_STALL (0 << IDR0_STALL_MODEL_SHIFT)
47 #define IDR0_STALL_MODEL_FORCE (2 << IDR0_STALL_MODEL_SHIFT)
48 #define IDR0_TTENDIAN_SHIFT 21
49 #define IDR0_TTENDIAN_MASK 0x3
50 #define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
51 #define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
52 #define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
53 #define IDR0_CD2L (1 << 19)
54 #define IDR0_VMID16 (1 << 18)
55 #define IDR0_PRI (1 << 16)
56 #define IDR0_SEV (1 << 14)
57 #define IDR0_MSI (1 << 13)
58 #define IDR0_ASID16 (1 << 12)
59 #define IDR0_ATS (1 << 10)
60 #define IDR0_HYP (1 << 9)
61 #define IDR0_COHACC (1 << 4)
62 #define IDR0_TTF_SHIFT 2
63 #define IDR0_TTF_MASK 0x3
64 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
65 #define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
66 #define IDR0_S1P (1 << 1)
67 #define IDR0_S2P (1 << 0)
69 #define ARM_SMMU_IDR1 0x4
70 #define IDR1_TABLES_PRESET (1 << 30)
71 #define IDR1_QUEUES_PRESET (1 << 29)
72 #define IDR1_REL (1 << 28)
73 #define IDR1_CMDQ_SHIFT 21
74 #define IDR1_CMDQ_MASK 0x1f
75 #define IDR1_EVTQ_SHIFT 16
76 #define IDR1_EVTQ_MASK 0x1f
77 #define IDR1_PRIQ_SHIFT 11
78 #define IDR1_PRIQ_MASK 0x1f
79 #define IDR1_SSID_SHIFT 6
80 #define IDR1_SSID_MASK 0x1f
81 #define IDR1_SID_SHIFT 0
82 #define IDR1_SID_MASK 0x3f
84 #define ARM_SMMU_IDR5 0x14
85 #define IDR5_STALL_MAX_SHIFT 16
86 #define IDR5_STALL_MAX_MASK 0xffff
87 #define IDR5_GRAN64K (1 << 6)
88 #define IDR5_GRAN16K (1 << 5)
89 #define IDR5_GRAN4K (1 << 4)
90 #define IDR5_OAS_SHIFT 0
91 #define IDR5_OAS_MASK 0x7
92 #define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
93 #define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
94 #define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
95 #define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
96 #define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
97 #define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
99 #define ARM_SMMU_CR0 0x20
100 #define CR0_CMDQEN (1 << 3)
101 #define CR0_EVTQEN (1 << 2)
102 #define CR0_PRIQEN (1 << 1)
103 #define CR0_SMMUEN (1 << 0)
105 #define ARM_SMMU_CR0ACK 0x24
107 #define ARM_SMMU_CR1 0x28
111 #define CR1_CACHE_NC 0
112 #define CR1_CACHE_WB 1
113 #define CR1_CACHE_WT 2
114 #define CR1_TABLE_SH_SHIFT 10
115 #define CR1_TABLE_OC_SHIFT 8
116 #define CR1_TABLE_IC_SHIFT 6
117 #define CR1_QUEUE_SH_SHIFT 4
118 #define CR1_QUEUE_OC_SHIFT 2
119 #define CR1_QUEUE_IC_SHIFT 0
121 #define ARM_SMMU_CR2 0x2c
122 #define CR2_PTM (1 << 2)
123 #define CR2_RECINVSID (1 << 1)
124 #define CR2_E2H (1 << 0)
126 #define ARM_SMMU_IRQ_CTRL 0x50
127 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
128 #define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
129 #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
131 #define ARM_SMMU_IRQ_CTRLACK 0x54
133 #define ARM_SMMU_GERROR 0x60
134 #define GERROR_SFM_ERR (1 << 8)
135 #define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
136 #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
137 #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
138 #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
139 #define GERROR_PRIQ_ABT_ERR (1 << 3)
140 #define GERROR_EVTQ_ABT_ERR (1 << 2)
141 #define GERROR_CMDQ_ERR (1 << 0)
142 #define GERROR_ERR_MASK 0xfd
144 #define ARM_SMMU_GERRORN 0x64
146 #define ARM_SMMU_GERROR_IRQ_CFG0 0x68
147 #define ARM_SMMU_GERROR_IRQ_CFG1 0x70
148 #define ARM_SMMU_GERROR_IRQ_CFG2 0x74
150 #define ARM_SMMU_STRTAB_BASE 0x80
151 #define STRTAB_BASE_RA (1UL << 62)
152 #define STRTAB_BASE_ADDR_SHIFT 6
153 #define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
155 #define ARM_SMMU_STRTAB_BASE_CFG 0x88
156 #define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
157 #define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
158 #define STRTAB_BASE_CFG_SPLIT_SHIFT 6
159 #define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
160 #define STRTAB_BASE_CFG_FMT_SHIFT 16
161 #define STRTAB_BASE_CFG_FMT_MASK 0x3
162 #define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
163 #define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
165 #define ARM_SMMU_CMDQ_BASE 0x90
166 #define ARM_SMMU_CMDQ_PROD 0x98
167 #define ARM_SMMU_CMDQ_CONS 0x9c
169 #define ARM_SMMU_EVTQ_BASE 0xa0
170 #define ARM_SMMU_EVTQ_PROD 0x100a8
171 #define ARM_SMMU_EVTQ_CONS 0x100ac
172 #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
173 #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
174 #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
176 #define ARM_SMMU_PRIQ_BASE 0xc0
177 #define ARM_SMMU_PRIQ_PROD 0x100c8
178 #define ARM_SMMU_PRIQ_CONS 0x100cc
179 #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
180 #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
181 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
183 /* Common MSI config fields */
184 #define MSI_CFG0_ADDR_SHIFT 2
185 #define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
186 #define MSI_CFG2_SH_SHIFT 4
187 #define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
188 #define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
189 #define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
190 #define MSI_CFG2_MEMATTR_SHIFT 0
191 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
193 #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
194 #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
195 #define Q_OVERFLOW_FLAG (1 << 31)
196 #define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
197 #define Q_ENT(q, p) ((q)->base + \
198 Q_IDX(q, p) * (q)->ent_dwords)
200 #define Q_BASE_RWA (1UL << 62)
201 #define Q_BASE_ADDR_SHIFT 5
202 #define Q_BASE_ADDR_MASK 0xfffffffffffUL
203 #define Q_BASE_LOG2SIZE_SHIFT 0
204 #define Q_BASE_LOG2SIZE_MASK 0x1fUL
209 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
210 * 2lvl: 128k L1 entries,
211 * 256 lazy entries per table (each table covers a PCI bus)
213 #define STRTAB_L1_SZ_SHIFT 20
214 #define STRTAB_SPLIT 8
216 #define STRTAB_L1_DESC_DWORDS 1
217 #define STRTAB_L1_DESC_SPAN_SHIFT 0
218 #define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
219 #define STRTAB_L1_DESC_L2PTR_SHIFT 6
220 #define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
222 #define STRTAB_STE_DWORDS 8
223 #define STRTAB_STE_0_V (1UL << 0)
224 #define STRTAB_STE_0_CFG_SHIFT 1
225 #define STRTAB_STE_0_CFG_MASK 0x7UL
226 #define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
227 #define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
228 #define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
229 #define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
231 #define STRTAB_STE_0_S1FMT_SHIFT 4
232 #define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
233 #define STRTAB_STE_0_S1CTXPTR_SHIFT 6
234 #define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
235 #define STRTAB_STE_0_S1CDMAX_SHIFT 59
236 #define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
238 #define STRTAB_STE_1_S1C_CACHE_NC 0UL
239 #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
240 #define STRTAB_STE_1_S1C_CACHE_WT 2UL
241 #define STRTAB_STE_1_S1C_CACHE_WB 3UL
242 #define STRTAB_STE_1_S1C_SH_NSH 0UL
243 #define STRTAB_STE_1_S1C_SH_OSH 2UL
244 #define STRTAB_STE_1_S1C_SH_ISH 3UL
245 #define STRTAB_STE_1_S1CIR_SHIFT 2
246 #define STRTAB_STE_1_S1COR_SHIFT 4
247 #define STRTAB_STE_1_S1CSH_SHIFT 6
249 #define STRTAB_STE_1_S1STALLD (1UL << 27)
251 #define STRTAB_STE_1_EATS_ABT 0UL
252 #define STRTAB_STE_1_EATS_TRANS 1UL
253 #define STRTAB_STE_1_EATS_S1CHK 2UL
254 #define STRTAB_STE_1_EATS_SHIFT 28
256 #define STRTAB_STE_1_STRW_NSEL1 0UL
257 #define STRTAB_STE_1_STRW_EL2 2UL
258 #define STRTAB_STE_1_STRW_SHIFT 30
260 #define STRTAB_STE_1_SHCFG_INCOMING 1UL
261 #define STRTAB_STE_1_SHCFG_SHIFT 44
263 #define STRTAB_STE_2_S2VMID_SHIFT 0
264 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL
265 #define STRTAB_STE_2_VTCR_SHIFT 32
266 #define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
267 #define STRTAB_STE_2_S2AA64 (1UL << 51)
268 #define STRTAB_STE_2_S2ENDI (1UL << 52)
269 #define STRTAB_STE_2_S2PTW (1UL << 54)
270 #define STRTAB_STE_2_S2R (1UL << 58)
272 #define STRTAB_STE_3_S2TTB_SHIFT 4
273 #define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
275 /* Context descriptor (stage-1 only) */
276 #define CTXDESC_CD_DWORDS 8
277 #define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
278 #define ARM64_TCR_T0SZ_SHIFT 0
279 #define ARM64_TCR_T0SZ_MASK 0x1fUL
280 #define CTXDESC_CD_0_TCR_TG0_SHIFT 6
281 #define ARM64_TCR_TG0_SHIFT 14
282 #define ARM64_TCR_TG0_MASK 0x3UL
283 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
284 #define ARM64_TCR_IRGN0_SHIFT 8
285 #define ARM64_TCR_IRGN0_MASK 0x3UL
286 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
287 #define ARM64_TCR_ORGN0_SHIFT 10
288 #define ARM64_TCR_ORGN0_MASK 0x3UL
289 #define CTXDESC_CD_0_TCR_SH0_SHIFT 12
290 #define ARM64_TCR_SH0_SHIFT 12
291 #define ARM64_TCR_SH0_MASK 0x3UL
292 #define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
293 #define ARM64_TCR_EPD0_SHIFT 7
294 #define ARM64_TCR_EPD0_MASK 0x1UL
295 #define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
296 #define ARM64_TCR_EPD1_SHIFT 23
297 #define ARM64_TCR_EPD1_MASK 0x1UL
299 #define CTXDESC_CD_0_ENDI (1UL << 15)
300 #define CTXDESC_CD_0_V (1UL << 31)
302 #define CTXDESC_CD_0_TCR_IPS_SHIFT 32
303 #define ARM64_TCR_IPS_SHIFT 32
304 #define ARM64_TCR_IPS_MASK 0x7UL
305 #define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
306 #define ARM64_TCR_TBI0_SHIFT 37
307 #define ARM64_TCR_TBI0_MASK 0x1UL
309 #define CTXDESC_CD_0_AA64 (1UL << 41)
310 #define CTXDESC_CD_0_R (1UL << 45)
311 #define CTXDESC_CD_0_A (1UL << 46)
312 #define CTXDESC_CD_0_ASET_SHIFT 47
313 #define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
314 #define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
315 #define CTXDESC_CD_0_ASID_SHIFT 48
316 #define CTXDESC_CD_0_ASID_MASK 0xffffUL
318 #define CTXDESC_CD_1_TTB0_SHIFT 4
319 #define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
321 #define CTXDESC_CD_3_MAIR_SHIFT 0
323 /* Convert between AArch64 (CPU) TCR format and SMMU CD format */
324 #define ARM_SMMU_TCR2CD(tcr, fld) \
325 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
326 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
329 #define CMDQ_ENT_DWORDS 2
330 #define CMDQ_MAX_SZ_SHIFT 8
332 #define CMDQ_ERR_SHIFT 24
333 #define CMDQ_ERR_MASK 0x7f
334 #define CMDQ_ERR_CERROR_NONE_IDX 0
335 #define CMDQ_ERR_CERROR_ILL_IDX 1
336 #define CMDQ_ERR_CERROR_ABT_IDX 2
338 #define CMDQ_0_OP_SHIFT 0
339 #define CMDQ_0_OP_MASK 0xffUL
340 #define CMDQ_0_SSV (1UL << 11)
342 #define CMDQ_PREFETCH_0_SID_SHIFT 32
343 #define CMDQ_PREFETCH_1_SIZE_SHIFT 0
344 #define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
346 #define CMDQ_CFGI_0_SID_SHIFT 32
347 #define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
348 #define CMDQ_CFGI_1_LEAF (1UL << 0)
349 #define CMDQ_CFGI_1_RANGE_SHIFT 0
350 #define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
352 #define CMDQ_TLBI_0_VMID_SHIFT 32
353 #define CMDQ_TLBI_0_ASID_SHIFT 48
354 #define CMDQ_TLBI_1_LEAF (1UL << 0)
355 #define CMDQ_TLBI_1_VA_MASK ~0xfffUL
356 #define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
358 #define CMDQ_PRI_0_SSID_SHIFT 12
359 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL
360 #define CMDQ_PRI_0_SID_SHIFT 32
361 #define CMDQ_PRI_0_SID_MASK 0xffffffffUL
362 #define CMDQ_PRI_1_GRPID_SHIFT 0
363 #define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
364 #define CMDQ_PRI_1_RESP_SHIFT 12
365 #define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
366 #define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
367 #define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
369 #define CMDQ_SYNC_0_CS_SHIFT 12
370 #define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
371 #define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
374 #define EVTQ_ENT_DWORDS 4
375 #define EVTQ_MAX_SZ_SHIFT 7
377 #define EVTQ_0_ID_SHIFT 0
378 #define EVTQ_0_ID_MASK 0xffUL
381 #define PRIQ_ENT_DWORDS 2
382 #define PRIQ_MAX_SZ_SHIFT 8
384 #define PRIQ_0_SID_SHIFT 0
385 #define PRIQ_0_SID_MASK 0xffffffffUL
386 #define PRIQ_0_SSID_SHIFT 32
387 #define PRIQ_0_SSID_MASK 0xfffffUL
388 #define PRIQ_0_PERM_PRIV (1UL << 58)
389 #define PRIQ_0_PERM_EXEC (1UL << 59)
390 #define PRIQ_0_PERM_READ (1UL << 60)
391 #define PRIQ_0_PERM_WRITE (1UL << 61)
392 #define PRIQ_0_PRG_LAST (1UL << 62)
393 #define PRIQ_0_SSID_V (1UL << 63)
395 #define PRIQ_1_PRG_IDX_SHIFT 0
396 #define PRIQ_1_PRG_IDX_MASK 0x1ffUL
397 #define PRIQ_1_ADDR_SHIFT 12
398 #define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
400 /* High-level queue structures */
401 #define ARM_SMMU_POLL_TIMEOUT_US 100
403 static bool disable_bypass;
404 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
405 MODULE_PARM_DESC(disable_bypass,
406 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
414 enum arm_smmu_msi_index {
421 static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
423 ARM_SMMU_EVTQ_IRQ_CFG0,
424 ARM_SMMU_EVTQ_IRQ_CFG1,
425 ARM_SMMU_EVTQ_IRQ_CFG2,
427 [GERROR_MSI_INDEX] = {
428 ARM_SMMU_GERROR_IRQ_CFG0,
429 ARM_SMMU_GERROR_IRQ_CFG1,
430 ARM_SMMU_GERROR_IRQ_CFG2,
433 ARM_SMMU_PRIQ_IRQ_CFG0,
434 ARM_SMMU_PRIQ_IRQ_CFG1,
435 ARM_SMMU_PRIQ_IRQ_CFG2,
439 struct arm_smmu_cmdq_ent {
442 bool substream_valid;
444 /* Command-specific fields */
446 #define CMDQ_OP_PREFETCH_CFG 0x1
453 #define CMDQ_OP_CFGI_STE 0x3
454 #define CMDQ_OP_CFGI_ALL 0x4
463 #define CMDQ_OP_TLBI_NH_ASID 0x11
464 #define CMDQ_OP_TLBI_NH_VA 0x12
465 #define CMDQ_OP_TLBI_EL2_ALL 0x20
466 #define CMDQ_OP_TLBI_S12_VMALL 0x28
467 #define CMDQ_OP_TLBI_S2_IPA 0x2a
468 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
476 #define CMDQ_OP_PRI_RESP 0x41
484 #define CMDQ_OP_CMD_SYNC 0x46
488 struct arm_smmu_queue {
489 int irq; /* Wired interrupt */
500 u32 __iomem *prod_reg;
501 u32 __iomem *cons_reg;
504 struct arm_smmu_cmdq {
505 struct arm_smmu_queue q;
509 struct arm_smmu_evtq {
510 struct arm_smmu_queue q;
514 struct arm_smmu_priq {
515 struct arm_smmu_queue q;
518 /* High-level stream table and context descriptor structures */
519 struct arm_smmu_strtab_l1_desc {
523 dma_addr_t l2ptr_dma;
526 struct arm_smmu_s1_cfg {
528 dma_addr_t cdptr_dma;
530 struct arm_smmu_ctx_desc {
538 struct arm_smmu_s2_cfg {
544 struct arm_smmu_strtab_ent {
547 bool bypass; /* Overrides s1/s2 config */
548 struct arm_smmu_s1_cfg *s1_cfg;
549 struct arm_smmu_s2_cfg *s2_cfg;
552 struct arm_smmu_strtab_cfg {
554 dma_addr_t strtab_dma;
555 struct arm_smmu_strtab_l1_desc *l1_desc;
556 unsigned int num_l1_ents;
562 /* An SMMUv3 instance */
563 struct arm_smmu_device {
567 #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
568 #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
569 #define ARM_SMMU_FEAT_TT_LE (1 << 2)
570 #define ARM_SMMU_FEAT_TT_BE (1 << 3)
571 #define ARM_SMMU_FEAT_PRI (1 << 4)
572 #define ARM_SMMU_FEAT_ATS (1 << 5)
573 #define ARM_SMMU_FEAT_SEV (1 << 6)
574 #define ARM_SMMU_FEAT_MSI (1 << 7)
575 #define ARM_SMMU_FEAT_COHERENCY (1 << 8)
576 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
577 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
578 #define ARM_SMMU_FEAT_STALLS (1 << 11)
579 #define ARM_SMMU_FEAT_HYP (1 << 12)
582 #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
585 struct arm_smmu_cmdq cmdq;
586 struct arm_smmu_evtq evtq;
587 struct arm_smmu_priq priq;
591 unsigned long ias; /* IPA */
592 unsigned long oas; /* PA */
593 unsigned long pgsize_bitmap;
595 #define ARM_SMMU_MAX_ASIDS (1 << 16)
596 unsigned int asid_bits;
597 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
599 #define ARM_SMMU_MAX_VMIDS (1 << 16)
600 unsigned int vmid_bits;
601 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
603 unsigned int ssid_bits;
604 unsigned int sid_bits;
606 struct arm_smmu_strtab_cfg strtab_cfg;
609 /* SMMU private data for an IOMMU group */
610 struct arm_smmu_group {
611 struct arm_smmu_device *smmu;
612 struct arm_smmu_domain *domain;
615 struct arm_smmu_strtab_ent ste;
618 /* SMMU private data for an IOMMU domain */
619 enum arm_smmu_domain_stage {
620 ARM_SMMU_DOMAIN_S1 = 0,
622 ARM_SMMU_DOMAIN_NESTED,
625 struct arm_smmu_domain {
626 struct arm_smmu_device *smmu;
627 struct mutex init_mutex; /* Protects smmu pointer */
629 struct io_pgtable_ops *pgtbl_ops;
630 spinlock_t pgtbl_lock;
632 enum arm_smmu_domain_stage stage;
634 struct arm_smmu_s1_cfg s1_cfg;
635 struct arm_smmu_s2_cfg s2_cfg;
638 struct iommu_domain domain;
641 struct arm_smmu_option_prop {
646 static struct arm_smmu_option_prop arm_smmu_options[] = {
647 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
651 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
653 return container_of(dom, struct arm_smmu_domain, domain);
656 static void parse_driver_options(struct arm_smmu_device *smmu)
661 if (of_property_read_bool(smmu->dev->of_node,
662 arm_smmu_options[i].prop)) {
663 smmu->options |= arm_smmu_options[i].opt;
664 dev_notice(smmu->dev, "option %s\n",
665 arm_smmu_options[i].prop);
667 } while (arm_smmu_options[++i].opt);
670 /* Low-level queue manipulation functions */
671 static bool queue_full(struct arm_smmu_queue *q)
673 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
674 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
677 static bool queue_empty(struct arm_smmu_queue *q)
679 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
680 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
683 static void queue_sync_cons(struct arm_smmu_queue *q)
685 q->cons = readl_relaxed(q->cons_reg);
688 static void queue_inc_cons(struct arm_smmu_queue *q)
690 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
692 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
693 writel(q->cons, q->cons_reg);
696 static int queue_sync_prod(struct arm_smmu_queue *q)
699 u32 prod = readl_relaxed(q->prod_reg);
701 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
708 static void queue_inc_prod(struct arm_smmu_queue *q)
710 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
712 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
713 writel(q->prod, q->prod_reg);
717 * Wait for the SMMU to consume items. If drain is true, wait until the queue
718 * is empty. Otherwise, wait until there is at least one free slot.
720 static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe)
722 ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
724 while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) {
725 if (ktime_compare(ktime_get(), timeout) > 0)
739 static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
743 for (i = 0; i < n_dwords; ++i)
744 *dst++ = cpu_to_le64(*src++);
747 static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
752 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
757 static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
761 for (i = 0; i < n_dwords; ++i)
762 *dst++ = le64_to_cpu(*src++);
765 static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
770 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
775 /* High-level queue accessors */
776 static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
778 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
779 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
781 switch (ent->opcode) {
782 case CMDQ_OP_TLBI_EL2_ALL:
783 case CMDQ_OP_TLBI_NSNH_ALL:
785 case CMDQ_OP_PREFETCH_CFG:
786 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
787 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
788 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
790 case CMDQ_OP_CFGI_STE:
791 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
792 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
794 case CMDQ_OP_CFGI_ALL:
795 /* Cover the entire SID range */
796 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
798 case CMDQ_OP_TLBI_NH_VA:
799 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
800 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
801 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
803 case CMDQ_OP_TLBI_S2_IPA:
804 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
805 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
806 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
808 case CMDQ_OP_TLBI_NH_ASID:
809 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
811 case CMDQ_OP_TLBI_S12_VMALL:
812 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
814 case CMDQ_OP_PRI_RESP:
815 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
816 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
817 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
818 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
819 switch (ent->pri.resp) {
821 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
824 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
827 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
833 case CMDQ_OP_CMD_SYNC:
834 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
843 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
845 static const char *cerror_str[] = {
846 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
847 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
848 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
852 u64 cmd[CMDQ_ENT_DWORDS];
853 struct arm_smmu_queue *q = &smmu->cmdq.q;
854 u32 cons = readl_relaxed(q->cons_reg);
855 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
856 struct arm_smmu_cmdq_ent cmd_sync = {
857 .opcode = CMDQ_OP_CMD_SYNC,
860 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
861 idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown");
864 case CMDQ_ERR_CERROR_ABT_IDX:
865 dev_err(smmu->dev, "retrying command fetch\n");
866 case CMDQ_ERR_CERROR_NONE_IDX:
868 case CMDQ_ERR_CERROR_ILL_IDX:
875 * We may have concurrent producers, so we need to be careful
876 * not to touch any of the shadow cmdq state.
878 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
879 dev_err(smmu->dev, "skipping command in error state:\n");
880 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
881 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
883 /* Convert the erroneous command into a CMD_SYNC */
884 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
885 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
889 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
892 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
893 struct arm_smmu_cmdq_ent *ent)
895 u64 cmd[CMDQ_ENT_DWORDS];
896 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
897 struct arm_smmu_queue *q = &smmu->cmdq.q;
899 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
900 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
905 spin_lock(&smmu->cmdq.lock);
906 while (queue_insert_raw(q, cmd) == -ENOSPC) {
907 if (queue_poll_cons(q, false, wfe))
908 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
911 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe))
912 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
913 spin_unlock(&smmu->cmdq.lock);
916 /* Context descriptor manipulation functions */
917 static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
921 /* Repack the TCR. Just care about TTBR0 for now */
922 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
923 val |= ARM_SMMU_TCR2CD(tcr, TG0);
924 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
925 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
926 val |= ARM_SMMU_TCR2CD(tcr, SH0);
927 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
928 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
929 val |= ARM_SMMU_TCR2CD(tcr, IPS);
930 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
935 static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
936 struct arm_smmu_s1_cfg *cfg)
941 * We don't need to issue any invalidation here, as we'll invalidate
942 * the STE when installing the new entry anyway.
944 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
948 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
949 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
951 cfg->cdptr[0] = cpu_to_le64(val);
953 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
954 cfg->cdptr[1] = cpu_to_le64(val);
956 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
959 /* Stream table manipulation functions */
961 arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
965 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
966 << STRTAB_L1_DESC_SPAN_SHIFT;
967 val |= desc->l2ptr_dma &
968 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
970 *dst = cpu_to_le64(val);
973 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
975 struct arm_smmu_cmdq_ent cmd = {
976 .opcode = CMDQ_OP_CFGI_STE,
983 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
984 cmd.opcode = CMDQ_OP_CMD_SYNC;
985 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
988 static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
989 __le64 *dst, struct arm_smmu_strtab_ent *ste)
992 * This is hideously complicated, but we only really care about
993 * three cases at the moment:
995 * 1. Invalid (all zero) -> bypass (init)
996 * 2. Bypass -> translation (attach)
997 * 3. Translation -> bypass (detach)
999 * Given that we can't update the STE atomically and the SMMU
1000 * doesn't read the thing in a defined order, that leaves us
1001 * with the following maintenance requirements:
1003 * 1. Update Config, return (init time STEs aren't live)
1004 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1005 * 3. Update Config, sync
1007 u64 val = le64_to_cpu(dst[0]);
1008 bool ste_live = false;
1009 struct arm_smmu_cmdq_ent prefetch_cmd = {
1010 .opcode = CMDQ_OP_PREFETCH_CFG,
1016 if (val & STRTAB_STE_0_V) {
1019 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
1021 case STRTAB_STE_0_CFG_BYPASS:
1023 case STRTAB_STE_0_CFG_S1_TRANS:
1024 case STRTAB_STE_0_CFG_S2_TRANS:
1027 case STRTAB_STE_0_CFG_ABORT:
1031 BUG(); /* STE corruption */
1035 /* Nuke the existing Config, as we're going to rewrite it */
1036 val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
1039 val |= STRTAB_STE_0_V;
1041 val &= ~STRTAB_STE_0_V;
1044 val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
1045 : STRTAB_STE_0_CFG_BYPASS;
1046 dst[0] = cpu_to_le64(val);
1047 dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
1048 << STRTAB_STE_1_SHCFG_SHIFT);
1049 dst[2] = 0; /* Nuke the VMID */
1051 arm_smmu_sync_ste_for_sid(smmu, sid);
1057 dst[1] = cpu_to_le64(
1058 STRTAB_STE_1_S1C_CACHE_WBRA
1059 << STRTAB_STE_1_S1CIR_SHIFT |
1060 STRTAB_STE_1_S1C_CACHE_WBRA
1061 << STRTAB_STE_1_S1COR_SHIFT |
1062 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
1063 #ifdef CONFIG_PCI_ATS
1064 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1066 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
1068 if (smmu->features & ARM_SMMU_FEAT_STALLS)
1069 dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
1071 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1072 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1073 STRTAB_STE_0_CFG_S1_TRANS;
1079 dst[2] = cpu_to_le64(
1080 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1081 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1082 << STRTAB_STE_2_VTCR_SHIFT |
1084 STRTAB_STE_2_S2ENDI |
1086 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1089 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1090 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1092 val |= STRTAB_STE_0_CFG_S2_TRANS;
1095 arm_smmu_sync_ste_for_sid(smmu, sid);
1096 dst[0] = cpu_to_le64(val);
1097 arm_smmu_sync_ste_for_sid(smmu, sid);
1099 /* It's likely that we'll want to use the new STE soon */
1100 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1101 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1104 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1107 struct arm_smmu_strtab_ent ste = {
1112 for (i = 0; i < nent; ++i) {
1113 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1114 strtab += STRTAB_STE_DWORDS;
1118 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1122 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1123 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1128 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1129 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1131 desc->span = STRTAB_SPLIT + 1;
1132 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1133 GFP_KERNEL | __GFP_ZERO);
1136 "failed to allocate l2 stream table for SID %u\n",
1141 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1142 arm_smmu_write_strtab_l1_desc(strtab, desc);
1146 /* IRQ and event handlers */
1147 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1150 struct arm_smmu_device *smmu = dev;
1151 struct arm_smmu_queue *q = &smmu->evtq.q;
1152 u64 evt[EVTQ_ENT_DWORDS];
1155 while (!queue_remove_raw(q, evt)) {
1156 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
1158 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1159 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1160 dev_info(smmu->dev, "\t0x%016llx\n",
1161 (unsigned long long)evt[i]);
1166 * Not much we can do on overflow, so scream and pretend we're
1169 if (queue_sync_prod(q) == -EOVERFLOW)
1170 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1171 } while (!queue_empty(q));
1173 /* Sync our overflow flag, as we believe we're up to speed */
1174 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1178 static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
1184 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1185 ssv = evt[0] & PRIQ_0_SSID_V;
1186 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1187 last = evt[0] & PRIQ_0_PRG_LAST;
1188 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1190 dev_info(smmu->dev, "unexpected PRI request received:\n");
1192 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1193 sid, ssid, grpid, last ? "L" : "",
1194 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1195 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1196 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1197 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1198 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1201 struct arm_smmu_cmdq_ent cmd = {
1202 .opcode = CMDQ_OP_PRI_RESP,
1203 .substream_valid = ssv,
1208 .resp = PRI_RESP_DENY,
1212 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1216 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1218 struct arm_smmu_device *smmu = dev;
1219 struct arm_smmu_queue *q = &smmu->priq.q;
1220 u64 evt[PRIQ_ENT_DWORDS];
1223 while (!queue_remove_raw(q, evt))
1224 arm_smmu_handle_ppr(smmu, evt);
1226 if (queue_sync_prod(q) == -EOVERFLOW)
1227 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1228 } while (!queue_empty(q));
1230 /* Sync our overflow flag, as we believe we're up to speed */
1231 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1235 static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1237 /* We don't actually use CMD_SYNC interrupts for anything */
1241 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1243 static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1245 u32 gerror, gerrorn, active;
1246 struct arm_smmu_device *smmu = dev;
1248 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1249 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1251 active = gerror ^ gerrorn;
1252 if (!(active & GERROR_ERR_MASK))
1253 return IRQ_NONE; /* No errors pending */
1256 "unexpected global error reported (0x%08x), this could be serious\n",
1259 if (active & GERROR_SFM_ERR) {
1260 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1261 arm_smmu_device_disable(smmu);
1264 if (active & GERROR_MSI_GERROR_ABT_ERR)
1265 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1267 if (active & GERROR_MSI_PRIQ_ABT_ERR)
1268 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1270 if (active & GERROR_MSI_EVTQ_ABT_ERR)
1271 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1273 if (active & GERROR_MSI_CMDQ_ABT_ERR) {
1274 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1275 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1278 if (active & GERROR_PRIQ_ABT_ERR)
1279 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1281 if (active & GERROR_EVTQ_ABT_ERR)
1282 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1284 if (active & GERROR_CMDQ_ERR)
1285 arm_smmu_cmdq_skip_err(smmu);
1287 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1291 /* IO_PGTABLE API */
1292 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1294 struct arm_smmu_cmdq_ent cmd;
1296 cmd.opcode = CMDQ_OP_CMD_SYNC;
1297 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1300 static void arm_smmu_tlb_sync(void *cookie)
1302 struct arm_smmu_domain *smmu_domain = cookie;
1303 __arm_smmu_tlb_sync(smmu_domain->smmu);
1306 static void arm_smmu_tlb_inv_context(void *cookie)
1308 struct arm_smmu_domain *smmu_domain = cookie;
1309 struct arm_smmu_device *smmu = smmu_domain->smmu;
1310 struct arm_smmu_cmdq_ent cmd;
1312 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1313 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1314 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1317 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1318 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1321 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1322 __arm_smmu_tlb_sync(smmu);
1325 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1326 size_t granule, bool leaf, void *cookie)
1328 struct arm_smmu_domain *smmu_domain = cookie;
1329 struct arm_smmu_device *smmu = smmu_domain->smmu;
1330 struct arm_smmu_cmdq_ent cmd = {
1337 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1338 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1339 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1341 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1342 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1346 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1347 cmd.tlbi.addr += granule;
1348 } while (size -= granule);
1351 static struct iommu_gather_ops arm_smmu_gather_ops = {
1352 .tlb_flush_all = arm_smmu_tlb_inv_context,
1353 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1354 .tlb_sync = arm_smmu_tlb_sync,
1358 static bool arm_smmu_capable(enum iommu_cap cap)
1361 case IOMMU_CAP_CACHE_COHERENCY:
1363 case IOMMU_CAP_INTR_REMAP:
1364 return true; /* MSIs are just memory writes */
1365 case IOMMU_CAP_NOEXEC:
1372 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1374 struct arm_smmu_domain *smmu_domain;
1376 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1380 * Allocate the domain and initialise some of its data structures.
1381 * We can't really do anything meaningful until we've added a
1384 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1388 if (type == IOMMU_DOMAIN_DMA &&
1389 iommu_get_dma_cookie(&smmu_domain->domain)) {
1394 mutex_init(&smmu_domain->init_mutex);
1395 spin_lock_init(&smmu_domain->pgtbl_lock);
1396 return &smmu_domain->domain;
1399 static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1401 int idx, size = 1 << span;
1404 idx = find_first_zero_bit(map, size);
1407 } while (test_and_set_bit(idx, map));
1412 static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1414 clear_bit(idx, map);
1417 static void arm_smmu_domain_free(struct iommu_domain *domain)
1419 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1420 struct arm_smmu_device *smmu = smmu_domain->smmu;
1422 iommu_put_dma_cookie(domain);
1423 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1425 /* Free the CD and ASID, if we allocated them */
1426 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1427 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1430 dmam_free_coherent(smmu_domain->smmu->dev,
1431 CTXDESC_CD_DWORDS << 3,
1435 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1438 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1440 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1446 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1447 struct io_pgtable_cfg *pgtbl_cfg)
1451 struct arm_smmu_device *smmu = smmu_domain->smmu;
1452 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1454 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1458 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1460 GFP_KERNEL | __GFP_ZERO);
1462 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1467 cfg->cd.asid = (u16)asid;
1468 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1469 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1470 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1474 arm_smmu_bitmap_free(smmu->asid_map, asid);
1478 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1479 struct io_pgtable_cfg *pgtbl_cfg)
1482 struct arm_smmu_device *smmu = smmu_domain->smmu;
1483 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1485 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1489 cfg->vmid = (u16)vmid;
1490 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1491 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1495 static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1498 unsigned long ias, oas;
1499 enum io_pgtable_fmt fmt;
1500 struct io_pgtable_cfg pgtbl_cfg;
1501 struct io_pgtable_ops *pgtbl_ops;
1502 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1503 struct io_pgtable_cfg *);
1504 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1505 struct arm_smmu_device *smmu = smmu_domain->smmu;
1507 /* Restrict the stage to what we can actually support */
1508 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1509 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1510 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1511 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1513 switch (smmu_domain->stage) {
1514 case ARM_SMMU_DOMAIN_S1:
1517 fmt = ARM_64_LPAE_S1;
1518 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1520 case ARM_SMMU_DOMAIN_NESTED:
1521 case ARM_SMMU_DOMAIN_S2:
1524 fmt = ARM_64_LPAE_S2;
1525 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1531 pgtbl_cfg = (struct io_pgtable_cfg) {
1532 .pgsize_bitmap = smmu->pgsize_bitmap,
1535 .tlb = &arm_smmu_gather_ops,
1536 .iommu_dev = smmu->dev,
1539 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1543 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1544 smmu_domain->pgtbl_ops = pgtbl_ops;
1546 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1548 free_io_pgtable_ops(pgtbl_ops);
1553 static struct arm_smmu_group *arm_smmu_group_get(struct device *dev)
1555 struct iommu_group *group;
1556 struct arm_smmu_group *smmu_group;
1558 group = iommu_group_get(dev);
1562 smmu_group = iommu_group_get_iommudata(group);
1563 iommu_group_put(group);
1567 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1570 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1572 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1573 struct arm_smmu_strtab_l1_desc *l1_desc;
1576 /* Two-level walk */
1577 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1578 l1_desc = &cfg->l1_desc[idx];
1579 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1580 step = &l1_desc->l2ptr[idx];
1582 /* Simple linear lookup */
1583 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1589 static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group)
1592 struct arm_smmu_domain *smmu_domain = smmu_group->domain;
1593 struct arm_smmu_strtab_ent *ste = &smmu_group->ste;
1594 struct arm_smmu_device *smmu = smmu_group->smmu;
1596 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1597 ste->s1_cfg = &smmu_domain->s1_cfg;
1599 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1602 ste->s2_cfg = &smmu_domain->s2_cfg;
1605 for (i = 0; i < smmu_group->num_sids; ++i) {
1606 u32 sid = smmu_group->sids[i];
1607 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1609 arm_smmu_write_strtab_ent(smmu, sid, step, ste);
1615 static void arm_smmu_detach_dev(struct device *dev)
1617 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1619 smmu_group->ste.bypass = true;
1620 if (arm_smmu_install_ste_for_group(smmu_group) < 0)
1621 dev_warn(dev, "failed to install bypass STE\n");
1623 smmu_group->domain = NULL;
1626 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1629 struct arm_smmu_device *smmu;
1630 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1631 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1636 /* Already attached to a different domain? */
1637 if (smmu_group->domain && smmu_group->domain != smmu_domain)
1638 arm_smmu_detach_dev(dev);
1640 smmu = smmu_group->smmu;
1641 mutex_lock(&smmu_domain->init_mutex);
1643 if (!smmu_domain->smmu) {
1644 smmu_domain->smmu = smmu;
1645 ret = arm_smmu_domain_finalise(domain);
1647 smmu_domain->smmu = NULL;
1650 } else if (smmu_domain->smmu != smmu) {
1652 "cannot attach to SMMU %s (upstream of %s)\n",
1653 dev_name(smmu_domain->smmu->dev),
1654 dev_name(smmu->dev));
1659 /* Group already attached to this domain? */
1660 if (smmu_group->domain)
1663 smmu_group->domain = smmu_domain;
1666 * FIXME: This should always be "false" once we have IOMMU-backed
1667 * DMA ops for all devices behind the SMMU.
1669 smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA;
1671 ret = arm_smmu_install_ste_for_group(smmu_group);
1673 smmu_group->domain = NULL;
1676 mutex_unlock(&smmu_domain->init_mutex);
1680 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1681 phys_addr_t paddr, size_t size, int prot)
1684 unsigned long flags;
1685 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1686 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1691 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1692 ret = ops->map(ops, iova, paddr, size, prot);
1693 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1698 arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1701 unsigned long flags;
1702 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1703 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1708 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1709 ret = ops->unmap(ops, iova, size);
1710 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1715 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1718 unsigned long flags;
1719 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1720 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1725 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1726 ret = ops->iova_to_phys(ops, iova);
1727 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1732 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *sidp)
1734 *(u32 *)sidp = alias;
1735 return 0; /* Continue walking */
1738 static void __arm_smmu_release_pci_iommudata(void *data)
1743 static struct arm_smmu_device *arm_smmu_get_for_pci_dev(struct pci_dev *pdev)
1745 struct device_node *of_node;
1746 struct platform_device *smmu_pdev;
1747 struct arm_smmu_device *smmu = NULL;
1748 struct pci_bus *bus = pdev->bus;
1750 /* Walk up to the root bus */
1751 while (!pci_is_root_bus(bus))
1754 /* Follow the "iommus" phandle from the host controller */
1755 of_node = of_parse_phandle(bus->bridge->parent->of_node, "iommus", 0);
1759 /* See if we can find an SMMU corresponding to the phandle */
1760 smmu_pdev = of_find_device_by_node(of_node);
1762 smmu = platform_get_drvdata(smmu_pdev);
1764 of_node_put(of_node);
1768 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1770 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1772 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1773 limit *= 1UL << STRTAB_SPLIT;
1778 static int arm_smmu_add_device(struct device *dev)
1782 struct pci_dev *pdev;
1783 struct iommu_group *group;
1784 struct arm_smmu_group *smmu_group;
1785 struct arm_smmu_device *smmu;
1787 /* We only support PCI, for now */
1788 if (!dev_is_pci(dev))
1791 pdev = to_pci_dev(dev);
1792 group = iommu_group_get_for_dev(dev);
1794 return PTR_ERR(group);
1796 smmu_group = iommu_group_get_iommudata(group);
1798 smmu = arm_smmu_get_for_pci_dev(pdev);
1801 goto out_remove_dev;
1804 smmu_group = kzalloc(sizeof(*smmu_group), GFP_KERNEL);
1807 goto out_remove_dev;
1810 smmu_group->ste.valid = true;
1811 smmu_group->smmu = smmu;
1812 iommu_group_set_iommudata(group, smmu_group,
1813 __arm_smmu_release_pci_iommudata);
1815 smmu = smmu_group->smmu;
1818 /* Assume SID == RID until firmware tells us otherwise */
1819 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1820 for (i = 0; i < smmu_group->num_sids; ++i) {
1821 /* If we already know about this SID, then we're done */
1822 if (smmu_group->sids[i] == sid)
1826 /* Check the SID is in range of the SMMU and our stream table */
1827 if (!arm_smmu_sid_in_range(smmu, sid)) {
1829 goto out_remove_dev;
1832 /* Ensure l2 strtab is initialised */
1833 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1834 ret = arm_smmu_init_l2_strtab(smmu, sid);
1836 goto out_remove_dev;
1839 /* Resize the SID array for the group */
1840 smmu_group->num_sids++;
1841 sids = krealloc(smmu_group->sids, smmu_group->num_sids * sizeof(*sids),
1844 smmu_group->num_sids--;
1846 goto out_remove_dev;
1849 /* Add the new SID */
1850 sids[smmu_group->num_sids - 1] = sid;
1851 smmu_group->sids = sids;
1854 iommu_group_put(group);
1858 iommu_group_remove_device(dev);
1859 iommu_group_put(group);
1863 static void arm_smmu_remove_device(struct device *dev)
1865 iommu_group_remove_device(dev);
1868 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1869 enum iommu_attr attr, void *data)
1871 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1874 case DOMAIN_ATTR_NESTING:
1875 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1882 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1883 enum iommu_attr attr, void *data)
1886 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1888 mutex_lock(&smmu_domain->init_mutex);
1891 case DOMAIN_ATTR_NESTING:
1892 if (smmu_domain->smmu) {
1898 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1900 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1908 mutex_unlock(&smmu_domain->init_mutex);
1912 static struct iommu_ops arm_smmu_ops = {
1913 .capable = arm_smmu_capable,
1914 .domain_alloc = arm_smmu_domain_alloc,
1915 .domain_free = arm_smmu_domain_free,
1916 .attach_dev = arm_smmu_attach_dev,
1917 .map = arm_smmu_map,
1918 .unmap = arm_smmu_unmap,
1919 .map_sg = default_iommu_map_sg,
1920 .iova_to_phys = arm_smmu_iova_to_phys,
1921 .add_device = arm_smmu_add_device,
1922 .remove_device = arm_smmu_remove_device,
1923 .device_group = pci_device_group,
1924 .domain_get_attr = arm_smmu_domain_get_attr,
1925 .domain_set_attr = arm_smmu_domain_set_attr,
1926 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1929 /* Probing and initialisation functions */
1930 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1931 struct arm_smmu_queue *q,
1932 unsigned long prod_off,
1933 unsigned long cons_off,
1936 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
1938 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
1940 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
1945 q->prod_reg = smmu->base + prod_off;
1946 q->cons_reg = smmu->base + cons_off;
1947 q->ent_dwords = dwords;
1949 q->q_base = Q_BASE_RWA;
1950 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
1951 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
1952 << Q_BASE_LOG2SIZE_SHIFT;
1954 q->prod = q->cons = 0;
1958 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
1963 spin_lock_init(&smmu->cmdq.lock);
1964 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
1965 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
1970 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
1971 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
1976 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
1979 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
1980 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
1983 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
1986 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1987 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
1988 void *strtab = smmu->strtab_cfg.strtab;
1990 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
1991 if (!cfg->l1_desc) {
1992 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
1996 for (i = 0; i < cfg->num_l1_ents; ++i) {
1997 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
1998 strtab += STRTAB_L1_DESC_DWORDS << 3;
2004 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2009 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2012 * If we can resolve everything with a single L2 table, then we
2013 * just need a single L1 descriptor. Otherwise, calculate the L1
2014 * size, capped to the SIDSIZE.
2016 if (smmu->sid_bits < STRTAB_SPLIT) {
2019 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2020 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2022 cfg->num_l1_ents = 1 << size;
2024 size += STRTAB_SPLIT;
2025 if (size < smmu->sid_bits)
2027 "2-level strtab only covers %u/%u bits of SID\n",
2028 size, smmu->sid_bits);
2030 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2031 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2032 GFP_KERNEL | __GFP_ZERO);
2035 "failed to allocate l1 stream table (%u bytes)\n",
2039 cfg->strtab = strtab;
2041 /* Configure strtab_base_cfg for 2 levels */
2042 reg = STRTAB_BASE_CFG_FMT_2LVL;
2043 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2044 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2045 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2046 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2047 cfg->strtab_base_cfg = reg;
2049 return arm_smmu_init_l1_strtab(smmu);
2052 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2057 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2059 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2060 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2061 GFP_KERNEL | __GFP_ZERO);
2064 "failed to allocate linear stream table (%u bytes)\n",
2068 cfg->strtab = strtab;
2069 cfg->num_l1_ents = 1 << smmu->sid_bits;
2071 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2072 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2073 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2074 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2075 cfg->strtab_base_cfg = reg;
2077 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2081 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2086 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2087 ret = arm_smmu_init_strtab_2lvl(smmu);
2089 ret = arm_smmu_init_strtab_linear(smmu);
2094 /* Set the strtab base address */
2095 reg = smmu->strtab_cfg.strtab_dma &
2096 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2097 reg |= STRTAB_BASE_RA;
2098 smmu->strtab_cfg.strtab_base = reg;
2100 /* Allocate the first VMID for stage-2 bypass STEs */
2101 set_bit(0, smmu->vmid_map);
2105 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2109 ret = arm_smmu_init_queues(smmu);
2113 return arm_smmu_init_strtab(smmu);
2116 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2117 unsigned int reg_off, unsigned int ack_off)
2121 writel_relaxed(val, smmu->base + reg_off);
2122 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2123 1, ARM_SMMU_POLL_TIMEOUT_US);
2126 static void arm_smmu_free_msis(void *data)
2128 struct device *dev = data;
2129 platform_msi_domain_free_irqs(dev);
2132 static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2134 phys_addr_t doorbell;
2135 struct device *dev = msi_desc_to_dev(desc);
2136 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2137 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2139 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
2140 doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
2142 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2143 writel_relaxed(msg->data, smmu->base + cfg[1]);
2144 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2147 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2149 struct msi_desc *desc;
2150 int ret, nvec = ARM_SMMU_MAX_MSIS;
2151 struct device *dev = smmu->dev;
2153 /* Clear the MSI address regs */
2154 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2155 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2157 if (smmu->features & ARM_SMMU_FEAT_PRI)
2158 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2162 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2165 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2166 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2168 dev_warn(dev, "failed to allocate MSIs\n");
2172 for_each_msi_entry(desc, dev) {
2173 switch (desc->platform.msi_index) {
2174 case EVTQ_MSI_INDEX:
2175 smmu->evtq.q.irq = desc->irq;
2177 case GERROR_MSI_INDEX:
2178 smmu->gerr_irq = desc->irq;
2180 case PRIQ_MSI_INDEX:
2181 smmu->priq.q.irq = desc->irq;
2183 default: /* Unknown */
2188 /* Add callback to free MSIs on teardown */
2189 devm_add_action(dev, arm_smmu_free_msis, dev);
2192 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2195 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
2197 /* Disable IRQs first */
2198 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2199 ARM_SMMU_IRQ_CTRLACK);
2201 dev_err(smmu->dev, "failed to disable irqs\n");
2205 arm_smmu_setup_msis(smmu);
2207 /* Request interrupt lines */
2208 irq = smmu->evtq.q.irq;
2210 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2211 arm_smmu_evtq_thread,
2213 "arm-smmu-v3-evtq", smmu);
2215 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2218 irq = smmu->cmdq.q.irq;
2220 ret = devm_request_irq(smmu->dev, irq,
2221 arm_smmu_cmdq_sync_handler, 0,
2222 "arm-smmu-v3-cmdq-sync", smmu);
2224 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2227 irq = smmu->gerr_irq;
2229 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2230 0, "arm-smmu-v3-gerror", smmu);
2232 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2235 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2236 irq = smmu->priq.q.irq;
2238 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2239 arm_smmu_priq_thread,
2245 "failed to enable priq irq\n");
2247 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
2251 /* Enable interrupt generation on the SMMU */
2252 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
2253 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2255 dev_warn(smmu->dev, "failed to enable irqs\n");
2260 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2264 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2266 dev_err(smmu->dev, "failed to clear cr0\n");
2271 static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
2275 struct arm_smmu_cmdq_ent cmd;
2277 /* Clear CR0 and sync (disables SMMU and queue processing) */
2278 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2279 if (reg & CR0_SMMUEN)
2280 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2282 ret = arm_smmu_device_disable(smmu);
2286 /* CR1 (table and queue memory attributes) */
2287 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2288 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2289 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2290 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2291 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2292 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2293 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2295 /* CR2 (random crap) */
2296 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2297 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2300 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2301 smmu->base + ARM_SMMU_STRTAB_BASE);
2302 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2303 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2306 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2307 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2308 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2310 enables = CR0_CMDQEN;
2311 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2314 dev_err(smmu->dev, "failed to enable command queue\n");
2318 /* Invalidate any cached configuration */
2319 cmd.opcode = CMDQ_OP_CFGI_ALL;
2320 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2321 cmd.opcode = CMDQ_OP_CMD_SYNC;
2322 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2324 /* Invalidate any stale TLB entries */
2325 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2326 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2327 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2330 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2331 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2332 cmd.opcode = CMDQ_OP_CMD_SYNC;
2333 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2336 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2337 writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD);
2338 writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS);
2340 enables |= CR0_EVTQEN;
2341 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2344 dev_err(smmu->dev, "failed to enable event queue\n");
2349 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2350 writeq_relaxed(smmu->priq.q.q_base,
2351 smmu->base + ARM_SMMU_PRIQ_BASE);
2352 writel_relaxed(smmu->priq.q.prod,
2353 smmu->base + ARM_SMMU_PRIQ_PROD);
2354 writel_relaxed(smmu->priq.q.cons,
2355 smmu->base + ARM_SMMU_PRIQ_CONS);
2357 enables |= CR0_PRIQEN;
2358 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2361 dev_err(smmu->dev, "failed to enable PRI queue\n");
2366 ret = arm_smmu_setup_irqs(smmu);
2368 dev_err(smmu->dev, "failed to setup irqs\n");
2372 /* Enable the SMMU interface */
2373 enables |= CR0_SMMUEN;
2374 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2377 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2384 static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
2390 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2392 /* 2-level structures */
2393 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2394 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2396 if (reg & IDR0_CD2L)
2397 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2400 * Translation table endianness.
2401 * We currently require the same endianness as the CPU, but this
2402 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2404 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2405 case IDR0_TTENDIAN_MIXED:
2406 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2409 case IDR0_TTENDIAN_BE:
2410 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2413 case IDR0_TTENDIAN_LE:
2414 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2418 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2422 /* Boolean feature flags */
2423 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2424 smmu->features |= ARM_SMMU_FEAT_PRI;
2426 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2427 smmu->features |= ARM_SMMU_FEAT_ATS;
2430 smmu->features |= ARM_SMMU_FEAT_SEV;
2433 smmu->features |= ARM_SMMU_FEAT_MSI;
2436 smmu->features |= ARM_SMMU_FEAT_HYP;
2439 * The dma-coherent property is used in preference to the ID
2440 * register, but warn on mismatch.
2442 coherent = of_dma_is_coherent(smmu->dev->of_node);
2444 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2446 if (!!(reg & IDR0_COHACC) != coherent)
2447 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2448 coherent ? "true" : "false");
2450 switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
2451 case IDR0_STALL_MODEL_STALL:
2453 case IDR0_STALL_MODEL_FORCE:
2454 smmu->features |= ARM_SMMU_FEAT_STALLS;
2458 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2461 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2463 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2464 dev_err(smmu->dev, "no translation support!\n");
2468 /* We only support the AArch64 table format at present */
2469 switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
2470 case IDR0_TTF_AARCH32_64:
2473 case IDR0_TTF_AARCH64:
2476 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2480 /* ASID/VMID sizes */
2481 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2482 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2485 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2486 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2487 dev_err(smmu->dev, "embedded implementation not supported\n");
2491 /* Queue sizes, capped at 4k */
2492 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2493 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2494 if (!smmu->cmdq.q.max_n_shift) {
2495 /* Odd alignment restrictions on the base, so ignore for now */
2496 dev_err(smmu->dev, "unit-length command queue not supported\n");
2500 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2501 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2502 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2503 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2505 /* SID/SSID sizes */
2506 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2507 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2510 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2512 /* Maximum number of outstanding stalls */
2513 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2514 & IDR5_STALL_MAX_MASK;
2517 if (reg & IDR5_GRAN64K)
2518 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
2519 if (reg & IDR5_GRAN16K)
2520 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
2521 if (reg & IDR5_GRAN4K)
2522 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2524 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2525 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2527 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2529 /* Output address size */
2530 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2531 case IDR5_OAS_32_BIT:
2534 case IDR5_OAS_36_BIT:
2537 case IDR5_OAS_40_BIT:
2540 case IDR5_OAS_42_BIT:
2543 case IDR5_OAS_44_BIT:
2548 "unknown output address size. Truncating to 48-bit\n");
2550 case IDR5_OAS_48_BIT:
2554 /* Set the DMA mask for our table walker */
2555 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2557 "failed to set DMA mask for table walker\n");
2559 smmu->ias = max(smmu->ias, smmu->oas);
2561 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2562 smmu->ias, smmu->oas, smmu->features);
2566 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2569 struct resource *res;
2570 struct arm_smmu_device *smmu;
2571 struct device *dev = &pdev->dev;
2573 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2575 dev_err(dev, "failed to allocate arm_smmu_device\n");
2581 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2582 if (resource_size(res) + 1 < SZ_128K) {
2583 dev_err(dev, "MMIO region too small (%pr)\n", res);
2587 smmu->base = devm_ioremap_resource(dev, res);
2588 if (IS_ERR(smmu->base))
2589 return PTR_ERR(smmu->base);
2591 /* Interrupt lines */
2592 irq = platform_get_irq_byname(pdev, "eventq");
2594 smmu->evtq.q.irq = irq;
2596 irq = platform_get_irq_byname(pdev, "priq");
2598 smmu->priq.q.irq = irq;
2600 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2602 smmu->cmdq.q.irq = irq;
2604 irq = platform_get_irq_byname(pdev, "gerror");
2606 smmu->gerr_irq = irq;
2608 parse_driver_options(smmu);
2611 ret = arm_smmu_device_probe(smmu);
2615 /* Initialise in-memory data structures */
2616 ret = arm_smmu_init_structures(smmu);
2620 /* Record our private device structure */
2621 platform_set_drvdata(pdev, smmu);
2623 /* Reset the device */
2624 return arm_smmu_device_reset(smmu);
2627 static int arm_smmu_device_remove(struct platform_device *pdev)
2629 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2631 arm_smmu_device_disable(smmu);
2635 static struct of_device_id arm_smmu_of_match[] = {
2636 { .compatible = "arm,smmu-v3", },
2639 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2641 static struct platform_driver arm_smmu_driver = {
2643 .name = "arm-smmu-v3",
2644 .of_match_table = of_match_ptr(arm_smmu_of_match),
2646 .probe = arm_smmu_device_dt_probe,
2647 .remove = arm_smmu_device_remove,
2650 static int __init arm_smmu_init(void)
2652 struct device_node *np;
2655 np = of_find_matching_node(NULL, arm_smmu_of_match);
2661 ret = platform_driver_register(&arm_smmu_driver);
2667 return bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2670 static void __exit arm_smmu_exit(void)
2672 return platform_driver_unregister(&arm_smmu_driver);
2675 subsys_initcall(arm_smmu_init);
2676 module_exit(arm_smmu_exit);
2678 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2679 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2680 MODULE_LICENSE("GPL v2");