2 * IOMMU API for ARM architected SMMUv3 implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2015 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
20 * This driver is powered by bad coffee and bombay mix.
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/interrupt.h>
26 #include <linux/iommu.h>
27 #include <linux/iopoll.h>
28 #include <linux/module.h>
29 #include <linux/msi.h>
31 #include <linux/of_address.h>
32 #include <linux/of_platform.h>
33 #include <linux/pci.h>
34 #include <linux/platform_device.h>
36 #include "io-pgtable.h"
39 #define ARM_SMMU_IDR0 0x0
40 #define IDR0_ST_LVL_SHIFT 27
41 #define IDR0_ST_LVL_MASK 0x3
42 #define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
43 #define IDR0_STALL_MODEL_SHIFT 24
44 #define IDR0_STALL_MODEL_MASK 0x3
45 #define IDR0_STALL_MODEL_STALL (0 << IDR0_STALL_MODEL_SHIFT)
46 #define IDR0_STALL_MODEL_FORCE (2 << IDR0_STALL_MODEL_SHIFT)
47 #define IDR0_TTENDIAN_SHIFT 21
48 #define IDR0_TTENDIAN_MASK 0x3
49 #define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
50 #define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
51 #define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
52 #define IDR0_CD2L (1 << 19)
53 #define IDR0_VMID16 (1 << 18)
54 #define IDR0_PRI (1 << 16)
55 #define IDR0_SEV (1 << 14)
56 #define IDR0_MSI (1 << 13)
57 #define IDR0_ASID16 (1 << 12)
58 #define IDR0_ATS (1 << 10)
59 #define IDR0_HYP (1 << 9)
60 #define IDR0_COHACC (1 << 4)
61 #define IDR0_TTF_SHIFT 2
62 #define IDR0_TTF_MASK 0x3
63 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
64 #define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
65 #define IDR0_S1P (1 << 1)
66 #define IDR0_S2P (1 << 0)
68 #define ARM_SMMU_IDR1 0x4
69 #define IDR1_TABLES_PRESET (1 << 30)
70 #define IDR1_QUEUES_PRESET (1 << 29)
71 #define IDR1_REL (1 << 28)
72 #define IDR1_CMDQ_SHIFT 21
73 #define IDR1_CMDQ_MASK 0x1f
74 #define IDR1_EVTQ_SHIFT 16
75 #define IDR1_EVTQ_MASK 0x1f
76 #define IDR1_PRIQ_SHIFT 11
77 #define IDR1_PRIQ_MASK 0x1f
78 #define IDR1_SSID_SHIFT 6
79 #define IDR1_SSID_MASK 0x1f
80 #define IDR1_SID_SHIFT 0
81 #define IDR1_SID_MASK 0x3f
83 #define ARM_SMMU_IDR5 0x14
84 #define IDR5_STALL_MAX_SHIFT 16
85 #define IDR5_STALL_MAX_MASK 0xffff
86 #define IDR5_GRAN64K (1 << 6)
87 #define IDR5_GRAN16K (1 << 5)
88 #define IDR5_GRAN4K (1 << 4)
89 #define IDR5_OAS_SHIFT 0
90 #define IDR5_OAS_MASK 0x7
91 #define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
92 #define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
93 #define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
94 #define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
95 #define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
96 #define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
98 #define ARM_SMMU_CR0 0x20
99 #define CR0_CMDQEN (1 << 3)
100 #define CR0_EVTQEN (1 << 2)
101 #define CR0_PRIQEN (1 << 1)
102 #define CR0_SMMUEN (1 << 0)
104 #define ARM_SMMU_CR0ACK 0x24
106 #define ARM_SMMU_CR1 0x28
110 #define CR1_CACHE_NC 0
111 #define CR1_CACHE_WB 1
112 #define CR1_CACHE_WT 2
113 #define CR1_TABLE_SH_SHIFT 10
114 #define CR1_TABLE_OC_SHIFT 8
115 #define CR1_TABLE_IC_SHIFT 6
116 #define CR1_QUEUE_SH_SHIFT 4
117 #define CR1_QUEUE_OC_SHIFT 2
118 #define CR1_QUEUE_IC_SHIFT 0
120 #define ARM_SMMU_CR2 0x2c
121 #define CR2_PTM (1 << 2)
122 #define CR2_RECINVSID (1 << 1)
123 #define CR2_E2H (1 << 0)
125 #define ARM_SMMU_IRQ_CTRL 0x50
126 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
127 #define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
128 #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
130 #define ARM_SMMU_IRQ_CTRLACK 0x54
132 #define ARM_SMMU_GERROR 0x60
133 #define GERROR_SFM_ERR (1 << 8)
134 #define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
135 #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
136 #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
137 #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
138 #define GERROR_PRIQ_ABT_ERR (1 << 3)
139 #define GERROR_EVTQ_ABT_ERR (1 << 2)
140 #define GERROR_CMDQ_ERR (1 << 0)
141 #define GERROR_ERR_MASK 0xfd
143 #define ARM_SMMU_GERRORN 0x64
145 #define ARM_SMMU_GERROR_IRQ_CFG0 0x68
146 #define ARM_SMMU_GERROR_IRQ_CFG1 0x70
147 #define ARM_SMMU_GERROR_IRQ_CFG2 0x74
149 #define ARM_SMMU_STRTAB_BASE 0x80
150 #define STRTAB_BASE_RA (1UL << 62)
151 #define STRTAB_BASE_ADDR_SHIFT 6
152 #define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
154 #define ARM_SMMU_STRTAB_BASE_CFG 0x88
155 #define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
156 #define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
157 #define STRTAB_BASE_CFG_SPLIT_SHIFT 6
158 #define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
159 #define STRTAB_BASE_CFG_FMT_SHIFT 16
160 #define STRTAB_BASE_CFG_FMT_MASK 0x3
161 #define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
162 #define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
164 #define ARM_SMMU_CMDQ_BASE 0x90
165 #define ARM_SMMU_CMDQ_PROD 0x98
166 #define ARM_SMMU_CMDQ_CONS 0x9c
168 #define ARM_SMMU_EVTQ_BASE 0xa0
169 #define ARM_SMMU_EVTQ_PROD 0x100a8
170 #define ARM_SMMU_EVTQ_CONS 0x100ac
171 #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
172 #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
173 #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
175 #define ARM_SMMU_PRIQ_BASE 0xc0
176 #define ARM_SMMU_PRIQ_PROD 0x100c8
177 #define ARM_SMMU_PRIQ_CONS 0x100cc
178 #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
179 #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
180 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
182 /* Common MSI config fields */
183 #define MSI_CFG0_ADDR_SHIFT 2
184 #define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
185 #define MSI_CFG2_SH_SHIFT 4
186 #define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
187 #define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
188 #define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
189 #define MSI_CFG2_MEMATTR_SHIFT 0
190 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
192 #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
193 #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
194 #define Q_OVERFLOW_FLAG (1 << 31)
195 #define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
196 #define Q_ENT(q, p) ((q)->base + \
197 Q_IDX(q, p) * (q)->ent_dwords)
199 #define Q_BASE_RWA (1UL << 62)
200 #define Q_BASE_ADDR_SHIFT 5
201 #define Q_BASE_ADDR_MASK 0xfffffffffffUL
202 #define Q_BASE_LOG2SIZE_SHIFT 0
203 #define Q_BASE_LOG2SIZE_MASK 0x1fUL
208 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
209 * 2lvl: 128k L1 entries,
210 * 256 lazy entries per table (each table covers a PCI bus)
212 #define STRTAB_L1_SZ_SHIFT 20
213 #define STRTAB_SPLIT 8
215 #define STRTAB_L1_DESC_DWORDS 1
216 #define STRTAB_L1_DESC_SPAN_SHIFT 0
217 #define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
218 #define STRTAB_L1_DESC_L2PTR_SHIFT 6
219 #define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
221 #define STRTAB_STE_DWORDS 8
222 #define STRTAB_STE_0_V (1UL << 0)
223 #define STRTAB_STE_0_CFG_SHIFT 1
224 #define STRTAB_STE_0_CFG_MASK 0x7UL
225 #define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
226 #define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
227 #define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
228 #define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
230 #define STRTAB_STE_0_S1FMT_SHIFT 4
231 #define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
232 #define STRTAB_STE_0_S1CTXPTR_SHIFT 6
233 #define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
234 #define STRTAB_STE_0_S1CDMAX_SHIFT 59
235 #define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
237 #define STRTAB_STE_1_S1C_CACHE_NC 0UL
238 #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
239 #define STRTAB_STE_1_S1C_CACHE_WT 2UL
240 #define STRTAB_STE_1_S1C_CACHE_WB 3UL
241 #define STRTAB_STE_1_S1C_SH_NSH 0UL
242 #define STRTAB_STE_1_S1C_SH_OSH 2UL
243 #define STRTAB_STE_1_S1C_SH_ISH 3UL
244 #define STRTAB_STE_1_S1CIR_SHIFT 2
245 #define STRTAB_STE_1_S1COR_SHIFT 4
246 #define STRTAB_STE_1_S1CSH_SHIFT 6
248 #define STRTAB_STE_1_S1STALLD (1UL << 27)
250 #define STRTAB_STE_1_EATS_ABT 0UL
251 #define STRTAB_STE_1_EATS_TRANS 1UL
252 #define STRTAB_STE_1_EATS_S1CHK 2UL
253 #define STRTAB_STE_1_EATS_SHIFT 28
255 #define STRTAB_STE_1_STRW_NSEL1 0UL
256 #define STRTAB_STE_1_STRW_EL2 2UL
257 #define STRTAB_STE_1_STRW_SHIFT 30
259 #define STRTAB_STE_1_SHCFG_INCOMING 1UL
260 #define STRTAB_STE_1_SHCFG_SHIFT 44
262 #define STRTAB_STE_2_S2VMID_SHIFT 0
263 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL
264 #define STRTAB_STE_2_VTCR_SHIFT 32
265 #define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
266 #define STRTAB_STE_2_S2AA64 (1UL << 51)
267 #define STRTAB_STE_2_S2ENDI (1UL << 52)
268 #define STRTAB_STE_2_S2PTW (1UL << 54)
269 #define STRTAB_STE_2_S2R (1UL << 58)
271 #define STRTAB_STE_3_S2TTB_SHIFT 4
272 #define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
274 /* Context descriptor (stage-1 only) */
275 #define CTXDESC_CD_DWORDS 8
276 #define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
277 #define ARM64_TCR_T0SZ_SHIFT 0
278 #define ARM64_TCR_T0SZ_MASK 0x1fUL
279 #define CTXDESC_CD_0_TCR_TG0_SHIFT 6
280 #define ARM64_TCR_TG0_SHIFT 14
281 #define ARM64_TCR_TG0_MASK 0x3UL
282 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
283 #define ARM64_TCR_IRGN0_SHIFT 8
284 #define ARM64_TCR_IRGN0_MASK 0x3UL
285 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
286 #define ARM64_TCR_ORGN0_SHIFT 10
287 #define ARM64_TCR_ORGN0_MASK 0x3UL
288 #define CTXDESC_CD_0_TCR_SH0_SHIFT 12
289 #define ARM64_TCR_SH0_SHIFT 12
290 #define ARM64_TCR_SH0_MASK 0x3UL
291 #define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
292 #define ARM64_TCR_EPD0_SHIFT 7
293 #define ARM64_TCR_EPD0_MASK 0x1UL
294 #define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
295 #define ARM64_TCR_EPD1_SHIFT 23
296 #define ARM64_TCR_EPD1_MASK 0x1UL
298 #define CTXDESC_CD_0_ENDI (1UL << 15)
299 #define CTXDESC_CD_0_V (1UL << 31)
301 #define CTXDESC_CD_0_TCR_IPS_SHIFT 32
302 #define ARM64_TCR_IPS_SHIFT 32
303 #define ARM64_TCR_IPS_MASK 0x7UL
304 #define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
305 #define ARM64_TCR_TBI0_SHIFT 37
306 #define ARM64_TCR_TBI0_MASK 0x1UL
308 #define CTXDESC_CD_0_AA64 (1UL << 41)
309 #define CTXDESC_CD_0_R (1UL << 45)
310 #define CTXDESC_CD_0_A (1UL << 46)
311 #define CTXDESC_CD_0_ASET_SHIFT 47
312 #define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
313 #define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
314 #define CTXDESC_CD_0_ASID_SHIFT 48
315 #define CTXDESC_CD_0_ASID_MASK 0xffffUL
317 #define CTXDESC_CD_1_TTB0_SHIFT 4
318 #define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
320 #define CTXDESC_CD_3_MAIR_SHIFT 0
322 /* Convert between AArch64 (CPU) TCR format and SMMU CD format */
323 #define ARM_SMMU_TCR2CD(tcr, fld) \
324 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
325 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
328 #define CMDQ_ENT_DWORDS 2
329 #define CMDQ_MAX_SZ_SHIFT 8
331 #define CMDQ_ERR_SHIFT 24
332 #define CMDQ_ERR_MASK 0x7f
333 #define CMDQ_ERR_CERROR_NONE_IDX 0
334 #define CMDQ_ERR_CERROR_ILL_IDX 1
335 #define CMDQ_ERR_CERROR_ABT_IDX 2
337 #define CMDQ_0_OP_SHIFT 0
338 #define CMDQ_0_OP_MASK 0xffUL
339 #define CMDQ_0_SSV (1UL << 11)
341 #define CMDQ_PREFETCH_0_SID_SHIFT 32
342 #define CMDQ_PREFETCH_1_SIZE_SHIFT 0
343 #define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
345 #define CMDQ_CFGI_0_SID_SHIFT 32
346 #define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
347 #define CMDQ_CFGI_1_LEAF (1UL << 0)
348 #define CMDQ_CFGI_1_RANGE_SHIFT 0
349 #define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
351 #define CMDQ_TLBI_0_VMID_SHIFT 32
352 #define CMDQ_TLBI_0_ASID_SHIFT 48
353 #define CMDQ_TLBI_1_LEAF (1UL << 0)
354 #define CMDQ_TLBI_1_VA_MASK ~0xfffUL
355 #define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
357 #define CMDQ_PRI_0_SSID_SHIFT 12
358 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL
359 #define CMDQ_PRI_0_SID_SHIFT 32
360 #define CMDQ_PRI_0_SID_MASK 0xffffffffUL
361 #define CMDQ_PRI_1_GRPID_SHIFT 0
362 #define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
363 #define CMDQ_PRI_1_RESP_SHIFT 12
364 #define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
365 #define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
366 #define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
368 #define CMDQ_SYNC_0_CS_SHIFT 12
369 #define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
370 #define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
373 #define EVTQ_ENT_DWORDS 4
374 #define EVTQ_MAX_SZ_SHIFT 7
376 #define EVTQ_0_ID_SHIFT 0
377 #define EVTQ_0_ID_MASK 0xffUL
380 #define PRIQ_ENT_DWORDS 2
381 #define PRIQ_MAX_SZ_SHIFT 8
383 #define PRIQ_0_SID_SHIFT 0
384 #define PRIQ_0_SID_MASK 0xffffffffUL
385 #define PRIQ_0_SSID_SHIFT 32
386 #define PRIQ_0_SSID_MASK 0xfffffUL
387 #define PRIQ_0_PERM_PRIV (1UL << 58)
388 #define PRIQ_0_PERM_EXEC (1UL << 59)
389 #define PRIQ_0_PERM_READ (1UL << 60)
390 #define PRIQ_0_PERM_WRITE (1UL << 61)
391 #define PRIQ_0_PRG_LAST (1UL << 62)
392 #define PRIQ_0_SSID_V (1UL << 63)
394 #define PRIQ_1_PRG_IDX_SHIFT 0
395 #define PRIQ_1_PRG_IDX_MASK 0x1ffUL
396 #define PRIQ_1_ADDR_SHIFT 12
397 #define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
399 /* High-level queue structures */
400 #define ARM_SMMU_POLL_TIMEOUT_US 100
402 static bool disable_bypass;
403 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
404 MODULE_PARM_DESC(disable_bypass,
405 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
413 enum arm_smmu_msi_index {
420 static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
422 ARM_SMMU_EVTQ_IRQ_CFG0,
423 ARM_SMMU_EVTQ_IRQ_CFG1,
424 ARM_SMMU_EVTQ_IRQ_CFG2,
426 [GERROR_MSI_INDEX] = {
427 ARM_SMMU_GERROR_IRQ_CFG0,
428 ARM_SMMU_GERROR_IRQ_CFG1,
429 ARM_SMMU_GERROR_IRQ_CFG2,
432 ARM_SMMU_PRIQ_IRQ_CFG0,
433 ARM_SMMU_PRIQ_IRQ_CFG1,
434 ARM_SMMU_PRIQ_IRQ_CFG2,
438 struct arm_smmu_cmdq_ent {
441 bool substream_valid;
443 /* Command-specific fields */
445 #define CMDQ_OP_PREFETCH_CFG 0x1
452 #define CMDQ_OP_CFGI_STE 0x3
453 #define CMDQ_OP_CFGI_ALL 0x4
462 #define CMDQ_OP_TLBI_NH_ASID 0x11
463 #define CMDQ_OP_TLBI_NH_VA 0x12
464 #define CMDQ_OP_TLBI_EL2_ALL 0x20
465 #define CMDQ_OP_TLBI_S12_VMALL 0x28
466 #define CMDQ_OP_TLBI_S2_IPA 0x2a
467 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
475 #define CMDQ_OP_PRI_RESP 0x41
483 #define CMDQ_OP_CMD_SYNC 0x46
487 struct arm_smmu_queue {
488 int irq; /* Wired interrupt */
499 u32 __iomem *prod_reg;
500 u32 __iomem *cons_reg;
503 struct arm_smmu_cmdq {
504 struct arm_smmu_queue q;
508 struct arm_smmu_evtq {
509 struct arm_smmu_queue q;
513 struct arm_smmu_priq {
514 struct arm_smmu_queue q;
517 /* High-level stream table and context descriptor structures */
518 struct arm_smmu_strtab_l1_desc {
522 dma_addr_t l2ptr_dma;
525 struct arm_smmu_s1_cfg {
527 dma_addr_t cdptr_dma;
529 struct arm_smmu_ctx_desc {
537 struct arm_smmu_s2_cfg {
543 struct arm_smmu_strtab_ent {
546 bool bypass; /* Overrides s1/s2 config */
547 struct arm_smmu_s1_cfg *s1_cfg;
548 struct arm_smmu_s2_cfg *s2_cfg;
551 struct arm_smmu_strtab_cfg {
553 dma_addr_t strtab_dma;
554 struct arm_smmu_strtab_l1_desc *l1_desc;
555 unsigned int num_l1_ents;
561 /* An SMMUv3 instance */
562 struct arm_smmu_device {
566 #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
567 #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
568 #define ARM_SMMU_FEAT_TT_LE (1 << 2)
569 #define ARM_SMMU_FEAT_TT_BE (1 << 3)
570 #define ARM_SMMU_FEAT_PRI (1 << 4)
571 #define ARM_SMMU_FEAT_ATS (1 << 5)
572 #define ARM_SMMU_FEAT_SEV (1 << 6)
573 #define ARM_SMMU_FEAT_MSI (1 << 7)
574 #define ARM_SMMU_FEAT_COHERENCY (1 << 8)
575 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
576 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
577 #define ARM_SMMU_FEAT_STALLS (1 << 11)
578 #define ARM_SMMU_FEAT_HYP (1 << 12)
581 #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
584 struct arm_smmu_cmdq cmdq;
585 struct arm_smmu_evtq evtq;
586 struct arm_smmu_priq priq;
590 unsigned long ias; /* IPA */
591 unsigned long oas; /* PA */
593 #define ARM_SMMU_MAX_ASIDS (1 << 16)
594 unsigned int asid_bits;
595 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
597 #define ARM_SMMU_MAX_VMIDS (1 << 16)
598 unsigned int vmid_bits;
599 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
601 unsigned int ssid_bits;
602 unsigned int sid_bits;
604 struct arm_smmu_strtab_cfg strtab_cfg;
607 /* SMMU private data for an IOMMU group */
608 struct arm_smmu_group {
609 struct arm_smmu_device *smmu;
610 struct arm_smmu_domain *domain;
613 struct arm_smmu_strtab_ent ste;
616 /* SMMU private data for an IOMMU domain */
617 enum arm_smmu_domain_stage {
618 ARM_SMMU_DOMAIN_S1 = 0,
620 ARM_SMMU_DOMAIN_NESTED,
623 struct arm_smmu_domain {
624 struct arm_smmu_device *smmu;
625 struct mutex init_mutex; /* Protects smmu pointer */
627 struct io_pgtable_ops *pgtbl_ops;
628 spinlock_t pgtbl_lock;
630 enum arm_smmu_domain_stage stage;
632 struct arm_smmu_s1_cfg s1_cfg;
633 struct arm_smmu_s2_cfg s2_cfg;
636 struct iommu_domain domain;
639 struct arm_smmu_option_prop {
644 static struct arm_smmu_option_prop arm_smmu_options[] = {
645 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
649 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
651 return container_of(dom, struct arm_smmu_domain, domain);
654 static void parse_driver_options(struct arm_smmu_device *smmu)
659 if (of_property_read_bool(smmu->dev->of_node,
660 arm_smmu_options[i].prop)) {
661 smmu->options |= arm_smmu_options[i].opt;
662 dev_notice(smmu->dev, "option %s\n",
663 arm_smmu_options[i].prop);
665 } while (arm_smmu_options[++i].opt);
668 /* Low-level queue manipulation functions */
669 static bool queue_full(struct arm_smmu_queue *q)
671 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
672 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
675 static bool queue_empty(struct arm_smmu_queue *q)
677 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
678 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
681 static void queue_sync_cons(struct arm_smmu_queue *q)
683 q->cons = readl_relaxed(q->cons_reg);
686 static void queue_inc_cons(struct arm_smmu_queue *q)
688 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
690 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
691 writel(q->cons, q->cons_reg);
694 static int queue_sync_prod(struct arm_smmu_queue *q)
697 u32 prod = readl_relaxed(q->prod_reg);
699 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
706 static void queue_inc_prod(struct arm_smmu_queue *q)
708 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
710 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
711 writel(q->prod, q->prod_reg);
714 static bool __queue_cons_before(struct arm_smmu_queue *q, u32 until)
716 if (Q_WRP(q, q->cons) == Q_WRP(q, until))
717 return Q_IDX(q, q->cons) < Q_IDX(q, until);
719 return Q_IDX(q, q->cons) >= Q_IDX(q, until);
722 static int queue_poll_cons(struct arm_smmu_queue *q, u32 until, bool wfe)
724 ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
726 while (queue_sync_cons(q), __queue_cons_before(q, until)) {
727 if (ktime_compare(ktime_get(), timeout) > 0)
741 static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
745 for (i = 0; i < n_dwords; ++i)
746 *dst++ = cpu_to_le64(*src++);
749 static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
754 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
759 static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
763 for (i = 0; i < n_dwords; ++i)
764 *dst++ = le64_to_cpu(*src++);
767 static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
772 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
777 /* High-level queue accessors */
778 static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
780 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
781 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
783 switch (ent->opcode) {
784 case CMDQ_OP_TLBI_EL2_ALL:
785 case CMDQ_OP_TLBI_NSNH_ALL:
787 case CMDQ_OP_PREFETCH_CFG:
788 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
789 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
790 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
792 case CMDQ_OP_CFGI_STE:
793 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
794 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
796 case CMDQ_OP_CFGI_ALL:
797 /* Cover the entire SID range */
798 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
800 case CMDQ_OP_TLBI_NH_VA:
801 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
802 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
803 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
805 case CMDQ_OP_TLBI_S2_IPA:
806 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
807 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
808 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
810 case CMDQ_OP_TLBI_NH_ASID:
811 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
813 case CMDQ_OP_TLBI_S12_VMALL:
814 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
816 case CMDQ_OP_PRI_RESP:
817 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
818 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
819 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
820 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
821 switch (ent->pri.resp) {
823 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
826 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
829 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
835 case CMDQ_OP_CMD_SYNC:
836 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
845 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
847 static const char *cerror_str[] = {
848 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
849 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
850 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
854 u64 cmd[CMDQ_ENT_DWORDS];
855 struct arm_smmu_queue *q = &smmu->cmdq.q;
856 u32 cons = readl_relaxed(q->cons_reg);
857 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
858 struct arm_smmu_cmdq_ent cmd_sync = {
859 .opcode = CMDQ_OP_CMD_SYNC,
862 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
863 idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown");
866 case CMDQ_ERR_CERROR_ABT_IDX:
867 dev_err(smmu->dev, "retrying command fetch\n");
868 case CMDQ_ERR_CERROR_NONE_IDX:
870 case CMDQ_ERR_CERROR_ILL_IDX:
877 * We may have concurrent producers, so we need to be careful
878 * not to touch any of the shadow cmdq state.
880 queue_read(cmd, Q_ENT(q, idx), q->ent_dwords);
881 dev_err(smmu->dev, "skipping command in error state:\n");
882 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
883 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
885 /* Convert the erroneous command into a CMD_SYNC */
886 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
887 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
891 queue_write(cmd, Q_ENT(q, idx), q->ent_dwords);
894 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
895 struct arm_smmu_cmdq_ent *ent)
898 u64 cmd[CMDQ_ENT_DWORDS];
899 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
900 struct arm_smmu_queue *q = &smmu->cmdq.q;
902 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
903 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
908 spin_lock(&smmu->cmdq.lock);
909 while (until = q->prod + 1, queue_insert_raw(q, cmd) == -ENOSPC) {
911 * Keep the queue locked, otherwise the producer could wrap
912 * twice and we could see a future consumer pointer that looks
913 * like it's behind us.
915 if (queue_poll_cons(q, until, wfe))
916 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
919 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, until, wfe))
920 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
921 spin_unlock(&smmu->cmdq.lock);
924 /* Context descriptor manipulation functions */
925 static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
929 /* Repack the TCR. Just care about TTBR0 for now */
930 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
931 val |= ARM_SMMU_TCR2CD(tcr, TG0);
932 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
933 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
934 val |= ARM_SMMU_TCR2CD(tcr, SH0);
935 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
936 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
937 val |= ARM_SMMU_TCR2CD(tcr, IPS);
938 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
943 static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
944 struct arm_smmu_s1_cfg *cfg)
949 * We don't need to issue any invalidation here, as we'll invalidate
950 * the STE when installing the new entry anyway.
952 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
956 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
957 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
959 cfg->cdptr[0] = cpu_to_le64(val);
961 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
962 cfg->cdptr[1] = cpu_to_le64(val);
964 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
967 /* Stream table manipulation functions */
969 arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
973 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
974 << STRTAB_L1_DESC_SPAN_SHIFT;
975 val |= desc->l2ptr_dma &
976 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
978 *dst = cpu_to_le64(val);
981 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
983 struct arm_smmu_cmdq_ent cmd = {
984 .opcode = CMDQ_OP_CFGI_STE,
991 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
992 cmd.opcode = CMDQ_OP_CMD_SYNC;
993 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
996 static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
997 __le64 *dst, struct arm_smmu_strtab_ent *ste)
1000 * This is hideously complicated, but we only really care about
1001 * three cases at the moment:
1003 * 1. Invalid (all zero) -> bypass (init)
1004 * 2. Bypass -> translation (attach)
1005 * 3. Translation -> bypass (detach)
1007 * Given that we can't update the STE atomically and the SMMU
1008 * doesn't read the thing in a defined order, that leaves us
1009 * with the following maintenance requirements:
1011 * 1. Update Config, return (init time STEs aren't live)
1012 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1013 * 3. Update Config, sync
1015 u64 val = le64_to_cpu(dst[0]);
1016 bool ste_live = false;
1017 struct arm_smmu_cmdq_ent prefetch_cmd = {
1018 .opcode = CMDQ_OP_PREFETCH_CFG,
1024 if (val & STRTAB_STE_0_V) {
1027 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
1029 case STRTAB_STE_0_CFG_BYPASS:
1031 case STRTAB_STE_0_CFG_S1_TRANS:
1032 case STRTAB_STE_0_CFG_S2_TRANS:
1036 BUG(); /* STE corruption */
1040 /* Nuke the existing Config, as we're going to rewrite it */
1041 val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
1044 val |= STRTAB_STE_0_V;
1046 val &= ~STRTAB_STE_0_V;
1049 val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
1050 : STRTAB_STE_0_CFG_BYPASS;
1051 dst[0] = cpu_to_le64(val);
1052 dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
1053 << STRTAB_STE_1_SHCFG_SHIFT);
1054 dst[2] = 0; /* Nuke the VMID */
1056 arm_smmu_sync_ste_for_sid(smmu, sid);
1062 dst[1] = cpu_to_le64(
1063 STRTAB_STE_1_S1C_CACHE_WBRA
1064 << STRTAB_STE_1_S1CIR_SHIFT |
1065 STRTAB_STE_1_S1C_CACHE_WBRA
1066 << STRTAB_STE_1_S1COR_SHIFT |
1067 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
1068 #ifdef CONFIG_PCI_ATS
1069 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1071 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
1073 if (smmu->features & ARM_SMMU_FEAT_STALLS)
1074 dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
1076 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1077 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1078 STRTAB_STE_0_CFG_S1_TRANS;
1084 dst[2] = cpu_to_le64(
1085 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1086 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1087 << STRTAB_STE_2_VTCR_SHIFT |
1089 STRTAB_STE_2_S2ENDI |
1091 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1094 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1095 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1097 val |= STRTAB_STE_0_CFG_S2_TRANS;
1100 arm_smmu_sync_ste_for_sid(smmu, sid);
1101 dst[0] = cpu_to_le64(val);
1102 arm_smmu_sync_ste_for_sid(smmu, sid);
1104 /* It's likely that we'll want to use the new STE soon */
1105 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1106 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1109 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1112 struct arm_smmu_strtab_ent ste = {
1117 for (i = 0; i < nent; ++i) {
1118 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1119 strtab += STRTAB_STE_DWORDS;
1123 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1127 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1128 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1133 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1134 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1136 desc->span = STRTAB_SPLIT + 1;
1137 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1138 GFP_KERNEL | __GFP_ZERO);
1141 "failed to allocate l2 stream table for SID %u\n",
1146 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1147 arm_smmu_write_strtab_l1_desc(strtab, desc);
1151 /* IRQ and event handlers */
1152 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1155 struct arm_smmu_device *smmu = dev;
1156 struct arm_smmu_queue *q = &smmu->evtq.q;
1157 u64 evt[EVTQ_ENT_DWORDS];
1159 while (!queue_remove_raw(q, evt)) {
1160 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
1162 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1163 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1164 dev_info(smmu->dev, "\t0x%016llx\n",
1165 (unsigned long long)evt[i]);
1168 /* Sync our overflow flag, as we believe we're up to speed */
1169 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1173 static irqreturn_t arm_smmu_evtq_handler(int irq, void *dev)
1175 irqreturn_t ret = IRQ_WAKE_THREAD;
1176 struct arm_smmu_device *smmu = dev;
1177 struct arm_smmu_queue *q = &smmu->evtq.q;
1180 * Not much we can do on overflow, so scream and pretend we're
1183 if (queue_sync_prod(q) == -EOVERFLOW)
1184 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1185 else if (queue_empty(q))
1191 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1193 struct arm_smmu_device *smmu = dev;
1194 struct arm_smmu_queue *q = &smmu->priq.q;
1195 u64 evt[PRIQ_ENT_DWORDS];
1197 while (!queue_remove_raw(q, evt)) {
1202 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1203 ssv = evt[0] & PRIQ_0_SSID_V;
1204 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1205 last = evt[0] & PRIQ_0_PRG_LAST;
1206 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1208 dev_info(smmu->dev, "unexpected PRI request received:\n");
1210 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1211 sid, ssid, grpid, last ? "L" : "",
1212 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1213 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1214 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1215 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1216 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1219 struct arm_smmu_cmdq_ent cmd = {
1220 .opcode = CMDQ_OP_PRI_RESP,
1221 .substream_valid = ssv,
1226 .resp = PRI_RESP_DENY,
1230 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1234 /* Sync our overflow flag, as we believe we're up to speed */
1235 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1239 static irqreturn_t arm_smmu_priq_handler(int irq, void *dev)
1241 irqreturn_t ret = IRQ_WAKE_THREAD;
1242 struct arm_smmu_device *smmu = dev;
1243 struct arm_smmu_queue *q = &smmu->priq.q;
1245 /* PRIQ overflow indicates a programming error */
1246 if (queue_sync_prod(q) == -EOVERFLOW)
1247 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1248 else if (queue_empty(q))
1254 static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1256 /* We don't actually use CMD_SYNC interrupts for anything */
1260 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1262 static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1264 u32 gerror, gerrorn, active;
1265 struct arm_smmu_device *smmu = dev;
1267 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1268 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1270 active = gerror ^ gerrorn;
1271 if (!(active & GERROR_ERR_MASK))
1272 return IRQ_NONE; /* No errors pending */
1275 "unexpected global error reported (0x%08x), this could be serious\n",
1278 if (active & GERROR_SFM_ERR) {
1279 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1280 arm_smmu_device_disable(smmu);
1283 if (active & GERROR_MSI_GERROR_ABT_ERR)
1284 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1286 if (active & GERROR_MSI_PRIQ_ABT_ERR) {
1287 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1288 arm_smmu_priq_handler(irq, smmu->dev);
1291 if (active & GERROR_MSI_EVTQ_ABT_ERR) {
1292 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1293 arm_smmu_evtq_handler(irq, smmu->dev);
1296 if (active & GERROR_MSI_CMDQ_ABT_ERR) {
1297 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1298 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1301 if (active & GERROR_PRIQ_ABT_ERR)
1302 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1304 if (active & GERROR_EVTQ_ABT_ERR)
1305 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1307 if (active & GERROR_CMDQ_ERR)
1308 arm_smmu_cmdq_skip_err(smmu);
1310 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1314 /* IO_PGTABLE API */
1315 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1317 struct arm_smmu_cmdq_ent cmd;
1319 cmd.opcode = CMDQ_OP_CMD_SYNC;
1320 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1323 static void arm_smmu_tlb_sync(void *cookie)
1325 struct arm_smmu_domain *smmu_domain = cookie;
1326 __arm_smmu_tlb_sync(smmu_domain->smmu);
1329 static void arm_smmu_tlb_inv_context(void *cookie)
1331 struct arm_smmu_domain *smmu_domain = cookie;
1332 struct arm_smmu_device *smmu = smmu_domain->smmu;
1333 struct arm_smmu_cmdq_ent cmd;
1335 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1336 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1337 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1340 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1341 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1344 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1345 __arm_smmu_tlb_sync(smmu);
1348 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1349 size_t granule, bool leaf, void *cookie)
1351 struct arm_smmu_domain *smmu_domain = cookie;
1352 struct arm_smmu_device *smmu = smmu_domain->smmu;
1353 struct arm_smmu_cmdq_ent cmd = {
1360 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1361 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1362 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1364 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1365 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1369 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1370 cmd.tlbi.addr += granule;
1371 } while (size -= granule);
1374 static struct iommu_gather_ops arm_smmu_gather_ops = {
1375 .tlb_flush_all = arm_smmu_tlb_inv_context,
1376 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1377 .tlb_sync = arm_smmu_tlb_sync,
1381 static bool arm_smmu_capable(enum iommu_cap cap)
1384 case IOMMU_CAP_CACHE_COHERENCY:
1386 case IOMMU_CAP_INTR_REMAP:
1387 return true; /* MSIs are just memory writes */
1388 case IOMMU_CAP_NOEXEC:
1395 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1397 struct arm_smmu_domain *smmu_domain;
1399 if (type != IOMMU_DOMAIN_UNMANAGED)
1403 * Allocate the domain and initialise some of its data structures.
1404 * We can't really do anything meaningful until we've added a
1407 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1411 mutex_init(&smmu_domain->init_mutex);
1412 spin_lock_init(&smmu_domain->pgtbl_lock);
1413 return &smmu_domain->domain;
1416 static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1418 int idx, size = 1 << span;
1421 idx = find_first_zero_bit(map, size);
1424 } while (test_and_set_bit(idx, map));
1429 static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1431 clear_bit(idx, map);
1434 static void arm_smmu_domain_free(struct iommu_domain *domain)
1436 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1437 struct arm_smmu_device *smmu = smmu_domain->smmu;
1439 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1441 /* Free the CD and ASID, if we allocated them */
1442 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1443 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1446 dmam_free_coherent(smmu_domain->smmu->dev,
1447 CTXDESC_CD_DWORDS << 3,
1451 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1454 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1456 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1462 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1463 struct io_pgtable_cfg *pgtbl_cfg)
1467 struct arm_smmu_device *smmu = smmu_domain->smmu;
1468 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1470 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1471 if (IS_ERR_VALUE(asid))
1474 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1476 GFP_KERNEL | __GFP_ZERO);
1478 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1483 cfg->cd.asid = (u16)asid;
1484 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1485 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1486 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1490 arm_smmu_bitmap_free(smmu->asid_map, asid);
1494 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1495 struct io_pgtable_cfg *pgtbl_cfg)
1498 struct arm_smmu_device *smmu = smmu_domain->smmu;
1499 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1501 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1502 if (IS_ERR_VALUE(vmid))
1505 cfg->vmid = (u16)vmid;
1506 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1507 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1511 static struct iommu_ops arm_smmu_ops;
1513 static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1516 unsigned long ias, oas;
1517 enum io_pgtable_fmt fmt;
1518 struct io_pgtable_cfg pgtbl_cfg;
1519 struct io_pgtable_ops *pgtbl_ops;
1520 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1521 struct io_pgtable_cfg *);
1522 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1523 struct arm_smmu_device *smmu = smmu_domain->smmu;
1525 /* Restrict the stage to what we can actually support */
1526 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1527 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1528 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1529 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1531 switch (smmu_domain->stage) {
1532 case ARM_SMMU_DOMAIN_S1:
1535 fmt = ARM_64_LPAE_S1;
1536 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1538 case ARM_SMMU_DOMAIN_NESTED:
1539 case ARM_SMMU_DOMAIN_S2:
1542 fmt = ARM_64_LPAE_S2;
1543 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1549 pgtbl_cfg = (struct io_pgtable_cfg) {
1550 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
1553 .tlb = &arm_smmu_gather_ops,
1554 .iommu_dev = smmu->dev,
1557 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1561 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1562 smmu_domain->pgtbl_ops = pgtbl_ops;
1564 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1565 if (IS_ERR_VALUE(ret))
1566 free_io_pgtable_ops(pgtbl_ops);
1571 static struct arm_smmu_group *arm_smmu_group_get(struct device *dev)
1573 struct iommu_group *group;
1574 struct arm_smmu_group *smmu_group;
1576 group = iommu_group_get(dev);
1580 smmu_group = iommu_group_get_iommudata(group);
1581 iommu_group_put(group);
1585 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1588 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1590 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1591 struct arm_smmu_strtab_l1_desc *l1_desc;
1594 /* Two-level walk */
1595 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1596 l1_desc = &cfg->l1_desc[idx];
1597 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1598 step = &l1_desc->l2ptr[idx];
1600 /* Simple linear lookup */
1601 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1607 static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group)
1610 struct arm_smmu_domain *smmu_domain = smmu_group->domain;
1611 struct arm_smmu_strtab_ent *ste = &smmu_group->ste;
1612 struct arm_smmu_device *smmu = smmu_group->smmu;
1614 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1615 ste->s1_cfg = &smmu_domain->s1_cfg;
1617 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1620 ste->s2_cfg = &smmu_domain->s2_cfg;
1623 for (i = 0; i < smmu_group->num_sids; ++i) {
1624 u32 sid = smmu_group->sids[i];
1625 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1627 arm_smmu_write_strtab_ent(smmu, sid, step, ste);
1633 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1636 struct arm_smmu_device *smmu;
1637 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1638 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1643 /* Already attached to a different domain? */
1644 if (smmu_group->domain && smmu_group->domain != smmu_domain)
1647 smmu = smmu_group->smmu;
1648 mutex_lock(&smmu_domain->init_mutex);
1650 if (!smmu_domain->smmu) {
1651 smmu_domain->smmu = smmu;
1652 ret = arm_smmu_domain_finalise(domain);
1654 smmu_domain->smmu = NULL;
1657 } else if (smmu_domain->smmu != smmu) {
1659 "cannot attach to SMMU %s (upstream of %s)\n",
1660 dev_name(smmu_domain->smmu->dev),
1661 dev_name(smmu->dev));
1666 /* Group already attached to this domain? */
1667 if (smmu_group->domain)
1670 smmu_group->domain = smmu_domain;
1671 smmu_group->ste.bypass = false;
1673 ret = arm_smmu_install_ste_for_group(smmu_group);
1674 if (IS_ERR_VALUE(ret))
1675 smmu_group->domain = NULL;
1678 mutex_unlock(&smmu_domain->init_mutex);
1682 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1684 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1685 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1687 BUG_ON(!smmu_domain);
1688 BUG_ON(!smmu_group);
1690 mutex_lock(&smmu_domain->init_mutex);
1691 BUG_ON(smmu_group->domain != smmu_domain);
1693 smmu_group->ste.bypass = true;
1694 if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group)))
1695 dev_warn(dev, "failed to install bypass STE\n");
1697 smmu_group->domain = NULL;
1698 mutex_unlock(&smmu_domain->init_mutex);
1701 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1702 phys_addr_t paddr, size_t size, int prot)
1705 unsigned long flags;
1706 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1707 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1712 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1713 ret = ops->map(ops, iova, paddr, size, prot);
1714 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1719 arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1722 unsigned long flags;
1723 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1724 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1729 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1730 ret = ops->unmap(ops, iova, size);
1731 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1736 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1739 unsigned long flags;
1740 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1741 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1746 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1747 ret = ops->iova_to_phys(ops, iova);
1748 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1753 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *sidp)
1755 *(u32 *)sidp = alias;
1756 return 0; /* Continue walking */
1759 static void __arm_smmu_release_pci_iommudata(void *data)
1764 static struct arm_smmu_device *arm_smmu_get_for_pci_dev(struct pci_dev *pdev)
1766 struct device_node *of_node;
1767 struct platform_device *smmu_pdev;
1768 struct arm_smmu_device *smmu = NULL;
1769 struct pci_bus *bus = pdev->bus;
1771 /* Walk up to the root bus */
1772 while (!pci_is_root_bus(bus))
1775 /* Follow the "iommus" phandle from the host controller */
1776 of_node = of_parse_phandle(bus->bridge->parent->of_node, "iommus", 0);
1780 /* See if we can find an SMMU corresponding to the phandle */
1781 smmu_pdev = of_find_device_by_node(of_node);
1783 smmu = platform_get_drvdata(smmu_pdev);
1785 of_node_put(of_node);
1789 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1791 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1793 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1794 limit *= 1UL << STRTAB_SPLIT;
1799 static int arm_smmu_add_device(struct device *dev)
1803 struct pci_dev *pdev;
1804 struct iommu_group *group;
1805 struct arm_smmu_group *smmu_group;
1806 struct arm_smmu_device *smmu;
1808 /* We only support PCI, for now */
1809 if (!dev_is_pci(dev))
1812 pdev = to_pci_dev(dev);
1813 group = iommu_group_get_for_dev(dev);
1815 return PTR_ERR(group);
1817 smmu_group = iommu_group_get_iommudata(group);
1819 smmu = arm_smmu_get_for_pci_dev(pdev);
1822 goto out_remove_dev;
1825 smmu_group = kzalloc(sizeof(*smmu_group), GFP_KERNEL);
1828 goto out_remove_dev;
1831 smmu_group->ste.valid = true;
1832 smmu_group->smmu = smmu;
1833 iommu_group_set_iommudata(group, smmu_group,
1834 __arm_smmu_release_pci_iommudata);
1836 smmu = smmu_group->smmu;
1839 /* Assume SID == RID until firmware tells us otherwise */
1840 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1841 for (i = 0; i < smmu_group->num_sids; ++i) {
1842 /* If we already know about this SID, then we're done */
1843 if (smmu_group->sids[i] == sid)
1847 /* Check the SID is in range of the SMMU and our stream table */
1848 if (!arm_smmu_sid_in_range(smmu, sid)) {
1850 goto out_remove_dev;
1853 /* Ensure l2 strtab is initialised */
1854 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1855 ret = arm_smmu_init_l2_strtab(smmu, sid);
1857 goto out_remove_dev;
1860 /* Resize the SID array for the group */
1861 smmu_group->num_sids++;
1862 sids = krealloc(smmu_group->sids, smmu_group->num_sids * sizeof(*sids),
1865 smmu_group->num_sids--;
1867 goto out_remove_dev;
1870 /* Add the new SID */
1871 sids[smmu_group->num_sids - 1] = sid;
1872 smmu_group->sids = sids;
1875 iommu_group_put(group);
1879 iommu_group_remove_device(dev);
1880 iommu_group_put(group);
1884 static void arm_smmu_remove_device(struct device *dev)
1886 iommu_group_remove_device(dev);
1889 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1890 enum iommu_attr attr, void *data)
1892 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1895 case DOMAIN_ATTR_NESTING:
1896 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1903 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1904 enum iommu_attr attr, void *data)
1907 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1909 mutex_lock(&smmu_domain->init_mutex);
1912 case DOMAIN_ATTR_NESTING:
1913 if (smmu_domain->smmu) {
1919 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1921 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1929 mutex_unlock(&smmu_domain->init_mutex);
1933 static struct iommu_ops arm_smmu_ops = {
1934 .capable = arm_smmu_capable,
1935 .domain_alloc = arm_smmu_domain_alloc,
1936 .domain_free = arm_smmu_domain_free,
1937 .attach_dev = arm_smmu_attach_dev,
1938 .detach_dev = arm_smmu_detach_dev,
1939 .map = arm_smmu_map,
1940 .unmap = arm_smmu_unmap,
1941 .iova_to_phys = arm_smmu_iova_to_phys,
1942 .add_device = arm_smmu_add_device,
1943 .remove_device = arm_smmu_remove_device,
1944 .device_group = pci_device_group,
1945 .domain_get_attr = arm_smmu_domain_get_attr,
1946 .domain_set_attr = arm_smmu_domain_set_attr,
1947 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1950 /* Probing and initialisation functions */
1951 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1952 struct arm_smmu_queue *q,
1953 unsigned long prod_off,
1954 unsigned long cons_off,
1957 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
1959 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
1961 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
1966 q->prod_reg = smmu->base + prod_off;
1967 q->cons_reg = smmu->base + cons_off;
1968 q->ent_dwords = dwords;
1970 q->q_base = Q_BASE_RWA;
1971 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
1972 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
1973 << Q_BASE_LOG2SIZE_SHIFT;
1975 q->prod = q->cons = 0;
1979 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
1984 spin_lock_init(&smmu->cmdq.lock);
1985 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
1986 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
1991 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
1992 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
1997 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
2000 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
2001 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
2004 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
2007 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2008 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
2009 void *strtab = smmu->strtab_cfg.strtab;
2011 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
2012 if (!cfg->l1_desc) {
2013 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
2017 for (i = 0; i < cfg->num_l1_ents; ++i) {
2018 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
2019 strtab += STRTAB_L1_DESC_DWORDS << 3;
2025 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2030 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2033 * If we can resolve everything with a single L2 table, then we
2034 * just need a single L1 descriptor. Otherwise, calculate the L1
2035 * size, capped to the SIDSIZE.
2037 if (smmu->sid_bits < STRTAB_SPLIT) {
2040 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2041 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2043 cfg->num_l1_ents = 1 << size;
2045 size += STRTAB_SPLIT;
2046 if (size < smmu->sid_bits)
2048 "2-level strtab only covers %u/%u bits of SID\n",
2049 size, smmu->sid_bits);
2051 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2052 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2053 GFP_KERNEL | __GFP_ZERO);
2056 "failed to allocate l1 stream table (%u bytes)\n",
2060 cfg->strtab = strtab;
2062 /* Configure strtab_base_cfg for 2 levels */
2063 reg = STRTAB_BASE_CFG_FMT_2LVL;
2064 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2065 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2066 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2067 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2068 cfg->strtab_base_cfg = reg;
2070 return arm_smmu_init_l1_strtab(smmu);
2073 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2078 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2080 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2081 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2082 GFP_KERNEL | __GFP_ZERO);
2085 "failed to allocate linear stream table (%u bytes)\n",
2089 cfg->strtab = strtab;
2090 cfg->num_l1_ents = 1 << smmu->sid_bits;
2092 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2093 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2094 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2095 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2096 cfg->strtab_base_cfg = reg;
2098 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2102 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2107 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2108 ret = arm_smmu_init_strtab_2lvl(smmu);
2110 ret = arm_smmu_init_strtab_linear(smmu);
2115 /* Set the strtab base address */
2116 reg = smmu->strtab_cfg.strtab_dma &
2117 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2118 reg |= STRTAB_BASE_RA;
2119 smmu->strtab_cfg.strtab_base = reg;
2121 /* Allocate the first VMID for stage-2 bypass STEs */
2122 set_bit(0, smmu->vmid_map);
2126 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2130 ret = arm_smmu_init_queues(smmu);
2134 return arm_smmu_init_strtab(smmu);
2137 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2138 unsigned int reg_off, unsigned int ack_off)
2142 writel_relaxed(val, smmu->base + reg_off);
2143 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2144 1, ARM_SMMU_POLL_TIMEOUT_US);
2147 static void arm_smmu_free_msis(void *data)
2149 struct device *dev = data;
2150 platform_msi_domain_free_irqs(dev);
2153 static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2155 phys_addr_t doorbell;
2156 struct device *dev = msi_desc_to_dev(desc);
2157 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2158 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2160 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
2161 doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
2163 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2164 writel_relaxed(msg->data, smmu->base + cfg[1]);
2165 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2168 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2170 struct msi_desc *desc;
2171 int ret, nvec = ARM_SMMU_MAX_MSIS;
2172 struct device *dev = smmu->dev;
2174 /* Clear the MSI address regs */
2175 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2176 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2178 if (smmu->features & ARM_SMMU_FEAT_PRI)
2179 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2183 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2186 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2187 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2189 dev_warn(dev, "failed to allocate MSIs\n");
2193 for_each_msi_entry(desc, dev) {
2194 switch (desc->platform.msi_index) {
2195 case EVTQ_MSI_INDEX:
2196 smmu->evtq.q.irq = desc->irq;
2198 case GERROR_MSI_INDEX:
2199 smmu->gerr_irq = desc->irq;
2201 case PRIQ_MSI_INDEX:
2202 smmu->priq.q.irq = desc->irq;
2204 default: /* Unknown */
2209 /* Add callback to free MSIs on teardown */
2210 devm_add_action(dev, arm_smmu_free_msis, dev);
2213 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2216 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
2218 /* Disable IRQs first */
2219 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2220 ARM_SMMU_IRQ_CTRLACK);
2222 dev_err(smmu->dev, "failed to disable irqs\n");
2226 arm_smmu_setup_msis(smmu);
2228 /* Request interrupt lines */
2229 irq = smmu->evtq.q.irq;
2231 ret = devm_request_threaded_irq(smmu->dev, irq,
2232 arm_smmu_evtq_handler,
2233 arm_smmu_evtq_thread,
2234 0, "arm-smmu-v3-evtq", smmu);
2235 if (IS_ERR_VALUE(ret))
2236 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2239 irq = smmu->cmdq.q.irq;
2241 ret = devm_request_irq(smmu->dev, irq,
2242 arm_smmu_cmdq_sync_handler, 0,
2243 "arm-smmu-v3-cmdq-sync", smmu);
2244 if (IS_ERR_VALUE(ret))
2245 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2248 irq = smmu->gerr_irq;
2250 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2251 0, "arm-smmu-v3-gerror", smmu);
2252 if (IS_ERR_VALUE(ret))
2253 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2256 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2257 irq = smmu->priq.q.irq;
2259 ret = devm_request_threaded_irq(smmu->dev, irq,
2260 arm_smmu_priq_handler,
2261 arm_smmu_priq_thread,
2262 0, "arm-smmu-v3-priq",
2264 if (IS_ERR_VALUE(ret))
2266 "failed to enable priq irq\n");
2268 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
2272 /* Enable interrupt generation on the SMMU */
2273 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
2274 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2276 dev_warn(smmu->dev, "failed to enable irqs\n");
2281 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2285 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2287 dev_err(smmu->dev, "failed to clear cr0\n");
2292 static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
2296 struct arm_smmu_cmdq_ent cmd;
2298 /* Clear CR0 and sync (disables SMMU and queue processing) */
2299 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2300 if (reg & CR0_SMMUEN)
2301 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2303 ret = arm_smmu_device_disable(smmu);
2307 /* CR1 (table and queue memory attributes) */
2308 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2309 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2310 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2311 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2312 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2313 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2314 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2316 /* CR2 (random crap) */
2317 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2318 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2321 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2322 smmu->base + ARM_SMMU_STRTAB_BASE);
2323 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2324 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2327 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2328 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2329 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2331 enables = CR0_CMDQEN;
2332 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2335 dev_err(smmu->dev, "failed to enable command queue\n");
2339 /* Invalidate any cached configuration */
2340 cmd.opcode = CMDQ_OP_CFGI_ALL;
2341 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2342 cmd.opcode = CMDQ_OP_CMD_SYNC;
2343 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2345 /* Invalidate any stale TLB entries */
2346 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2347 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2348 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2351 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2352 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2353 cmd.opcode = CMDQ_OP_CMD_SYNC;
2354 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2357 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2358 writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD);
2359 writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS);
2361 enables |= CR0_EVTQEN;
2362 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2365 dev_err(smmu->dev, "failed to enable event queue\n");
2370 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2371 writeq_relaxed(smmu->priq.q.q_base,
2372 smmu->base + ARM_SMMU_PRIQ_BASE);
2373 writel_relaxed(smmu->priq.q.prod,
2374 smmu->base + ARM_SMMU_PRIQ_PROD);
2375 writel_relaxed(smmu->priq.q.cons,
2376 smmu->base + ARM_SMMU_PRIQ_CONS);
2378 enables |= CR0_PRIQEN;
2379 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2382 dev_err(smmu->dev, "failed to enable PRI queue\n");
2387 ret = arm_smmu_setup_irqs(smmu);
2389 dev_err(smmu->dev, "failed to setup irqs\n");
2393 /* Enable the SMMU interface */
2394 enables |= CR0_SMMUEN;
2395 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2398 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2405 static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
2409 unsigned long pgsize_bitmap = 0;
2412 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2414 /* 2-level structures */
2415 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2416 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2418 if (reg & IDR0_CD2L)
2419 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2422 * Translation table endianness.
2423 * We currently require the same endianness as the CPU, but this
2424 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2426 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2427 case IDR0_TTENDIAN_MIXED:
2428 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2431 case IDR0_TTENDIAN_BE:
2432 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2435 case IDR0_TTENDIAN_LE:
2436 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2440 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2444 /* Boolean feature flags */
2445 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2446 smmu->features |= ARM_SMMU_FEAT_PRI;
2448 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2449 smmu->features |= ARM_SMMU_FEAT_ATS;
2452 smmu->features |= ARM_SMMU_FEAT_SEV;
2455 smmu->features |= ARM_SMMU_FEAT_MSI;
2458 smmu->features |= ARM_SMMU_FEAT_HYP;
2461 * The dma-coherent property is used in preference to the ID
2462 * register, but warn on mismatch.
2464 coherent = of_dma_is_coherent(smmu->dev->of_node);
2466 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2468 if (!!(reg & IDR0_COHACC) != coherent)
2469 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2470 coherent ? "true" : "false");
2472 switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
2473 case IDR0_STALL_MODEL_STALL:
2475 case IDR0_STALL_MODEL_FORCE:
2476 smmu->features |= ARM_SMMU_FEAT_STALLS;
2480 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2483 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2485 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2486 dev_err(smmu->dev, "no translation support!\n");
2490 /* We only support the AArch64 table format at present */
2491 switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
2492 case IDR0_TTF_AARCH32_64:
2495 case IDR0_TTF_AARCH64:
2498 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2502 /* ASID/VMID sizes */
2503 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2504 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2507 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2508 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2509 dev_err(smmu->dev, "embedded implementation not supported\n");
2513 /* Queue sizes, capped at 4k */
2514 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2515 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2516 if (!smmu->cmdq.q.max_n_shift) {
2517 /* Odd alignment restrictions on the base, so ignore for now */
2518 dev_err(smmu->dev, "unit-length command queue not supported\n");
2522 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2523 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2524 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2525 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2527 /* SID/SSID sizes */
2528 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2529 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2532 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2534 /* Maximum number of outstanding stalls */
2535 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2536 & IDR5_STALL_MAX_MASK;
2539 if (reg & IDR5_GRAN64K)
2540 pgsize_bitmap |= SZ_64K | SZ_512M;
2541 if (reg & IDR5_GRAN16K)
2542 pgsize_bitmap |= SZ_16K | SZ_32M;
2543 if (reg & IDR5_GRAN4K)
2544 pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2546 arm_smmu_ops.pgsize_bitmap &= pgsize_bitmap;
2548 /* Output address size */
2549 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2550 case IDR5_OAS_32_BIT:
2553 case IDR5_OAS_36_BIT:
2556 case IDR5_OAS_40_BIT:
2559 case IDR5_OAS_42_BIT:
2562 case IDR5_OAS_44_BIT:
2567 "unknown output address size. Truncating to 48-bit\n");
2569 case IDR5_OAS_48_BIT:
2573 /* Set the DMA mask for our table walker */
2574 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2576 "failed to set DMA mask for table walker\n");
2578 smmu->ias = max(smmu->ias, smmu->oas);
2580 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2581 smmu->ias, smmu->oas, smmu->features);
2585 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2588 struct resource *res;
2589 struct arm_smmu_device *smmu;
2590 struct device *dev = &pdev->dev;
2592 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2594 dev_err(dev, "failed to allocate arm_smmu_device\n");
2600 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2601 if (resource_size(res) + 1 < SZ_128K) {
2602 dev_err(dev, "MMIO region too small (%pr)\n", res);
2606 smmu->base = devm_ioremap_resource(dev, res);
2607 if (IS_ERR(smmu->base))
2608 return PTR_ERR(smmu->base);
2610 /* Interrupt lines */
2611 irq = platform_get_irq_byname(pdev, "eventq");
2613 smmu->evtq.q.irq = irq;
2615 irq = platform_get_irq_byname(pdev, "priq");
2617 smmu->priq.q.irq = irq;
2619 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2621 smmu->cmdq.q.irq = irq;
2623 irq = platform_get_irq_byname(pdev, "gerror");
2625 smmu->gerr_irq = irq;
2627 parse_driver_options(smmu);
2630 ret = arm_smmu_device_probe(smmu);
2634 /* Initialise in-memory data structures */
2635 ret = arm_smmu_init_structures(smmu);
2639 /* Record our private device structure */
2640 platform_set_drvdata(pdev, smmu);
2642 /* Reset the device */
2643 return arm_smmu_device_reset(smmu);
2646 static int arm_smmu_device_remove(struct platform_device *pdev)
2648 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2650 arm_smmu_device_disable(smmu);
2654 static struct of_device_id arm_smmu_of_match[] = {
2655 { .compatible = "arm,smmu-v3", },
2658 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2660 static struct platform_driver arm_smmu_driver = {
2662 .name = "arm-smmu-v3",
2663 .of_match_table = of_match_ptr(arm_smmu_of_match),
2665 .probe = arm_smmu_device_dt_probe,
2666 .remove = arm_smmu_device_remove,
2669 static int __init arm_smmu_init(void)
2671 struct device_node *np;
2674 np = of_find_matching_node(NULL, arm_smmu_of_match);
2680 ret = platform_driver_register(&arm_smmu_driver);
2684 return bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2687 static void __exit arm_smmu_exit(void)
2689 return platform_driver_unregister(&arm_smmu_driver);
2692 subsys_initcall(arm_smmu_init);
2693 module_exit(arm_smmu_exit);
2695 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2696 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2697 MODULE_LICENSE("GPL v2");