2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - Context fault reporting
29 #define pr_fmt(fmt) "arm-smmu: " fmt
31 #include <linux/delay.h>
32 #include <linux/dma-iommu.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/err.h>
35 #include <linux/interrupt.h>
37 #include <linux/iommu.h>
38 #include <linux/iopoll.h>
39 #include <linux/module.h>
41 #include <linux/of_address.h>
42 #include <linux/pci.h>
43 #include <linux/platform_device.h>
44 #include <linux/slab.h>
45 #include <linux/spinlock.h>
47 #include <linux/amba/bus.h>
49 #include "io-pgtable.h"
51 /* Maximum number of stream IDs assigned to a single device */
52 #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
54 /* Maximum number of context banks per SMMU */
55 #define ARM_SMMU_MAX_CBS 128
57 /* Maximum number of mapping groups per SMMU */
58 #define ARM_SMMU_MAX_SMRS 128
60 /* SMMU global address space */
61 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
62 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
65 * SMMU global address space with conditional offset to access secure
66 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 #define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
75 #define smmu_writeq writeq_relaxed
77 #define smmu_writeq(reg64, addr) \
79 u64 __val = (reg64); \
80 void __iomem *__addr = (addr); \
81 writel_relaxed(__val >> 32, __addr + 4); \
82 writel_relaxed(__val, __addr); \
86 /* Configuration registers */
87 #define ARM_SMMU_GR0_sCR0 0x0
88 #define sCR0_CLIENTPD (1 << 0)
89 #define sCR0_GFRE (1 << 1)
90 #define sCR0_GFIE (1 << 2)
91 #define sCR0_GCFGFRE (1 << 4)
92 #define sCR0_GCFGFIE (1 << 5)
93 #define sCR0_USFCFG (1 << 10)
94 #define sCR0_VMIDPNE (1 << 11)
95 #define sCR0_PTM (1 << 12)
96 #define sCR0_FB (1 << 13)
97 #define sCR0_BSU_SHIFT 14
98 #define sCR0_BSU_MASK 0x3
100 /* Identification registers */
101 #define ARM_SMMU_GR0_ID0 0x20
102 #define ARM_SMMU_GR0_ID1 0x24
103 #define ARM_SMMU_GR0_ID2 0x28
104 #define ARM_SMMU_GR0_ID3 0x2c
105 #define ARM_SMMU_GR0_ID4 0x30
106 #define ARM_SMMU_GR0_ID5 0x34
107 #define ARM_SMMU_GR0_ID6 0x38
108 #define ARM_SMMU_GR0_ID7 0x3c
109 #define ARM_SMMU_GR0_sGFSR 0x48
110 #define ARM_SMMU_GR0_sGFSYNR0 0x50
111 #define ARM_SMMU_GR0_sGFSYNR1 0x54
112 #define ARM_SMMU_GR0_sGFSYNR2 0x58
114 #define ID0_S1TS (1 << 30)
115 #define ID0_S2TS (1 << 29)
116 #define ID0_NTS (1 << 28)
117 #define ID0_SMS (1 << 27)
118 #define ID0_ATOSNS (1 << 26)
119 #define ID0_CTTW (1 << 14)
120 #define ID0_NUMIRPT_SHIFT 16
121 #define ID0_NUMIRPT_MASK 0xff
122 #define ID0_NUMSIDB_SHIFT 9
123 #define ID0_NUMSIDB_MASK 0xf
124 #define ID0_NUMSMRG_SHIFT 0
125 #define ID0_NUMSMRG_MASK 0xff
127 #define ID1_PAGESIZE (1 << 31)
128 #define ID1_NUMPAGENDXB_SHIFT 28
129 #define ID1_NUMPAGENDXB_MASK 7
130 #define ID1_NUMS2CB_SHIFT 16
131 #define ID1_NUMS2CB_MASK 0xff
132 #define ID1_NUMCB_SHIFT 0
133 #define ID1_NUMCB_MASK 0xff
135 #define ID2_OAS_SHIFT 4
136 #define ID2_OAS_MASK 0xf
137 #define ID2_IAS_SHIFT 0
138 #define ID2_IAS_MASK 0xf
139 #define ID2_UBS_SHIFT 8
140 #define ID2_UBS_MASK 0xf
141 #define ID2_PTFS_4K (1 << 12)
142 #define ID2_PTFS_16K (1 << 13)
143 #define ID2_PTFS_64K (1 << 14)
145 /* Global TLB invalidation */
146 #define ARM_SMMU_GR0_TLBIVMID 0x64
147 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
148 #define ARM_SMMU_GR0_TLBIALLH 0x6c
149 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
150 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
151 #define sTLBGSTATUS_GSACTIVE (1 << 0)
152 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
154 /* Stream mapping registers */
155 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
156 #define SMR_VALID (1 << 31)
157 #define SMR_MASK_SHIFT 16
158 #define SMR_MASK_MASK 0x7fff
159 #define SMR_ID_SHIFT 0
160 #define SMR_ID_MASK 0x7fff
162 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
163 #define S2CR_CBNDX_SHIFT 0
164 #define S2CR_CBNDX_MASK 0xff
165 #define S2CR_TYPE_SHIFT 16
166 #define S2CR_TYPE_MASK 0x3
167 #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
168 #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
169 #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
171 #define S2CR_PRIVCFG_SHIFT 24
172 #define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
174 /* Context bank attribute registers */
175 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
176 #define CBAR_VMID_SHIFT 0
177 #define CBAR_VMID_MASK 0xff
178 #define CBAR_S1_BPSHCFG_SHIFT 8
179 #define CBAR_S1_BPSHCFG_MASK 3
180 #define CBAR_S1_BPSHCFG_NSH 3
181 #define CBAR_S1_MEMATTR_SHIFT 12
182 #define CBAR_S1_MEMATTR_MASK 0xf
183 #define CBAR_S1_MEMATTR_WB 0xf
184 #define CBAR_TYPE_SHIFT 16
185 #define CBAR_TYPE_MASK 0x3
186 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
187 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
188 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
189 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
190 #define CBAR_IRPTNDX_SHIFT 24
191 #define CBAR_IRPTNDX_MASK 0xff
193 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
194 #define CBA2R_RW64_32BIT (0 << 0)
195 #define CBA2R_RW64_64BIT (1 << 0)
197 /* Translation context bank */
198 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
199 #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
201 #define ARM_SMMU_CB_SCTLR 0x0
202 #define ARM_SMMU_CB_RESUME 0x8
203 #define ARM_SMMU_CB_TTBCR2 0x10
204 #define ARM_SMMU_CB_TTBR0 0x20
205 #define ARM_SMMU_CB_TTBR1 0x28
206 #define ARM_SMMU_CB_TTBCR 0x30
207 #define ARM_SMMU_CB_S1_MAIR0 0x38
208 #define ARM_SMMU_CB_S1_MAIR1 0x3c
209 #define ARM_SMMU_CB_PAR_LO 0x50
210 #define ARM_SMMU_CB_PAR_HI 0x54
211 #define ARM_SMMU_CB_FSR 0x58
212 #define ARM_SMMU_CB_FAR_LO 0x60
213 #define ARM_SMMU_CB_FAR_HI 0x64
214 #define ARM_SMMU_CB_FSYNR0 0x68
215 #define ARM_SMMU_CB_S1_TLBIVA 0x600
216 #define ARM_SMMU_CB_S1_TLBIASID 0x610
217 #define ARM_SMMU_CB_S1_TLBIVAL 0x620
218 #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
219 #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
220 #define ARM_SMMU_CB_ATS1PR 0x800
221 #define ARM_SMMU_CB_ATSR 0x8f0
223 #define SCTLR_S1_ASIDPNE (1 << 12)
224 #define SCTLR_CFCFG (1 << 7)
225 #define SCTLR_CFIE (1 << 6)
226 #define SCTLR_CFRE (1 << 5)
227 #define SCTLR_E (1 << 4)
228 #define SCTLR_AFE (1 << 2)
229 #define SCTLR_TRE (1 << 1)
230 #define SCTLR_M (1 << 0)
231 #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
233 #define CB_PAR_F (1 << 0)
235 #define ATSR_ACTIVE (1 << 0)
237 #define RESUME_RETRY (0 << 0)
238 #define RESUME_TERMINATE (1 << 0)
240 #define TTBCR2_SEP_SHIFT 15
241 #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
243 #define TTBRn_ASID_SHIFT 48
245 #define FSR_MULTI (1 << 31)
246 #define FSR_SS (1 << 30)
247 #define FSR_UUT (1 << 8)
248 #define FSR_ASF (1 << 7)
249 #define FSR_TLBLKF (1 << 6)
250 #define FSR_TLBMCF (1 << 5)
251 #define FSR_EF (1 << 4)
252 #define FSR_PF (1 << 3)
253 #define FSR_AFF (1 << 2)
254 #define FSR_TF (1 << 1)
256 #define FSR_IGN (FSR_AFF | FSR_ASF | \
257 FSR_TLBMCF | FSR_TLBLKF)
258 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
259 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
261 #define FSYNR0_WNR (1 << 4)
263 static int force_stage;
264 module_param_named(force_stage, force_stage, int, S_IRUGO);
265 MODULE_PARM_DESC(force_stage,
266 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
268 enum arm_smmu_arch_version {
273 struct arm_smmu_smr {
279 struct arm_smmu_master_cfg {
281 u16 streamids[MAX_MASTER_STREAMIDS];
282 struct arm_smmu_smr *smrs;
285 struct arm_smmu_master {
286 struct device_node *of_node;
288 struct arm_smmu_master_cfg cfg;
291 struct arm_smmu_device {
296 unsigned long pgshift;
298 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
299 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
300 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
301 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
302 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
303 #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
306 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
308 enum arm_smmu_arch_version version;
310 u32 num_context_banks;
311 u32 num_s2_context_banks;
312 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
315 u32 num_mapping_groups;
316 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
318 unsigned long va_size;
319 unsigned long ipa_size;
320 unsigned long pa_size;
323 u32 num_context_irqs;
326 struct list_head list;
327 struct rb_root masters;
330 struct arm_smmu_cfg {
335 #define INVALID_IRPTNDX 0xff
337 #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
338 #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
340 enum arm_smmu_domain_stage {
341 ARM_SMMU_DOMAIN_S1 = 0,
343 ARM_SMMU_DOMAIN_NESTED,
346 struct arm_smmu_domain {
347 struct arm_smmu_device *smmu;
348 struct io_pgtable_ops *pgtbl_ops;
349 spinlock_t pgtbl_lock;
350 struct arm_smmu_cfg cfg;
351 enum arm_smmu_domain_stage stage;
352 struct mutex init_mutex; /* Protects smmu pointer */
353 struct iommu_domain domain;
356 static struct iommu_ops arm_smmu_ops;
358 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
359 static LIST_HEAD(arm_smmu_devices);
361 struct arm_smmu_option_prop {
366 static struct arm_smmu_option_prop arm_smmu_options[] = {
367 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
371 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
373 return container_of(dom, struct arm_smmu_domain, domain);
376 static void parse_driver_options(struct arm_smmu_device *smmu)
381 if (of_property_read_bool(smmu->dev->of_node,
382 arm_smmu_options[i].prop)) {
383 smmu->options |= arm_smmu_options[i].opt;
384 dev_notice(smmu->dev, "option %s\n",
385 arm_smmu_options[i].prop);
387 } while (arm_smmu_options[++i].opt);
390 static struct device_node *dev_get_dev_node(struct device *dev)
392 if (dev_is_pci(dev)) {
393 struct pci_bus *bus = to_pci_dev(dev)->bus;
395 while (!pci_is_root_bus(bus))
397 return bus->bridge->parent->of_node;
403 static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
404 struct device_node *dev_node)
406 struct rb_node *node = smmu->masters.rb_node;
409 struct arm_smmu_master *master;
411 master = container_of(node, struct arm_smmu_master, node);
413 if (dev_node < master->of_node)
414 node = node->rb_left;
415 else if (dev_node > master->of_node)
416 node = node->rb_right;
424 static struct arm_smmu_master_cfg *
425 find_smmu_master_cfg(struct device *dev)
427 struct arm_smmu_master_cfg *cfg = NULL;
428 struct iommu_group *group = iommu_group_get(dev);
431 cfg = iommu_group_get_iommudata(group);
432 iommu_group_put(group);
438 static int insert_smmu_master(struct arm_smmu_device *smmu,
439 struct arm_smmu_master *master)
441 struct rb_node **new, *parent;
443 new = &smmu->masters.rb_node;
446 struct arm_smmu_master *this
447 = container_of(*new, struct arm_smmu_master, node);
450 if (master->of_node < this->of_node)
451 new = &((*new)->rb_left);
452 else if (master->of_node > this->of_node)
453 new = &((*new)->rb_right);
458 rb_link_node(&master->node, parent, new);
459 rb_insert_color(&master->node, &smmu->masters);
463 static int register_smmu_master(struct arm_smmu_device *smmu,
465 struct of_phandle_args *masterspec)
468 struct arm_smmu_master *master;
470 master = find_smmu_master(smmu, masterspec->np);
473 "rejecting multiple registrations for master device %s\n",
474 masterspec->np->name);
478 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
480 "reached maximum number (%d) of stream IDs for master device %s\n",
481 MAX_MASTER_STREAMIDS, masterspec->np->name);
485 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
489 master->of_node = masterspec->np;
490 master->cfg.num_streamids = masterspec->args_count;
492 for (i = 0; i < master->cfg.num_streamids; ++i) {
493 u16 streamid = masterspec->args[i];
495 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
496 (streamid >= smmu->num_mapping_groups)) {
498 "stream ID for master device %s greater than maximum allowed (%d)\n",
499 masterspec->np->name, smmu->num_mapping_groups);
502 master->cfg.streamids[i] = streamid;
504 return insert_smmu_master(smmu, master);
507 static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
509 struct arm_smmu_device *smmu;
510 struct arm_smmu_master *master = NULL;
511 struct device_node *dev_node = dev_get_dev_node(dev);
513 spin_lock(&arm_smmu_devices_lock);
514 list_for_each_entry(smmu, &arm_smmu_devices, list) {
515 master = find_smmu_master(smmu, dev_node);
519 spin_unlock(&arm_smmu_devices_lock);
521 return master ? smmu : NULL;
524 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
529 idx = find_next_zero_bit(map, end, start);
532 } while (test_and_set_bit(idx, map));
537 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
542 /* Wait for any pending TLB invalidations to complete */
543 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
546 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
548 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
549 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
550 & sTLBGSTATUS_GSACTIVE) {
552 if (++count == TLB_LOOP_TIMEOUT) {
553 dev_err_ratelimited(smmu->dev,
554 "TLB sync timed out -- SMMU may be deadlocked\n");
561 static void arm_smmu_tlb_sync(void *cookie)
563 struct arm_smmu_domain *smmu_domain = cookie;
564 __arm_smmu_tlb_sync(smmu_domain->smmu);
567 static void arm_smmu_tlb_inv_context(void *cookie)
569 struct arm_smmu_domain *smmu_domain = cookie;
570 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
571 struct arm_smmu_device *smmu = smmu_domain->smmu;
572 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
576 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
577 writel_relaxed(ARM_SMMU_CB_ASID(cfg),
578 base + ARM_SMMU_CB_S1_TLBIASID);
580 base = ARM_SMMU_GR0(smmu);
581 writel_relaxed(ARM_SMMU_CB_VMID(cfg),
582 base + ARM_SMMU_GR0_TLBIVMID);
585 __arm_smmu_tlb_sync(smmu);
588 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
589 size_t granule, bool leaf, void *cookie)
591 struct arm_smmu_domain *smmu_domain = cookie;
592 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
593 struct arm_smmu_device *smmu = smmu_domain->smmu;
594 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
598 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
599 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
601 if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
603 iova |= ARM_SMMU_CB_ASID(cfg);
605 writel_relaxed(iova, reg);
607 } while (size -= granule);
611 iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
613 writeq_relaxed(iova, reg);
614 iova += granule >> 12;
615 } while (size -= granule);
619 } else if (smmu->version == ARM_SMMU_V2) {
620 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
621 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
622 ARM_SMMU_CB_S2_TLBIIPAS2;
625 writeq_relaxed(iova, reg);
626 iova += granule >> 12;
627 } while (size -= granule);
630 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
631 writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
635 static struct iommu_gather_ops arm_smmu_gather_ops = {
636 .tlb_flush_all = arm_smmu_tlb_inv_context,
637 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
638 .tlb_sync = arm_smmu_tlb_sync,
641 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
644 u32 fsr, far, fsynr, resume;
646 struct iommu_domain *domain = dev;
647 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
648 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
649 struct arm_smmu_device *smmu = smmu_domain->smmu;
650 void __iomem *cb_base;
652 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
653 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
655 if (!(fsr & FSR_FAULT))
659 dev_err_ratelimited(smmu->dev,
660 "Unexpected context fault (fsr 0x%x)\n",
663 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
664 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
666 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
669 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
670 iova |= ((unsigned long)far << 32);
673 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
675 resume = RESUME_RETRY;
677 dev_err_ratelimited(smmu->dev,
678 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
679 iova, fsynr, cfg->cbndx);
681 resume = RESUME_TERMINATE;
684 /* Clear the faulting FSR */
685 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
687 /* Retry or terminate any stalled transactions */
689 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
694 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
696 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
697 struct arm_smmu_device *smmu = dev;
698 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
700 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
701 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
702 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
703 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
708 dev_err_ratelimited(smmu->dev,
709 "Unexpected global fault, this could be serious\n");
710 dev_err_ratelimited(smmu->dev,
711 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
712 gfsr, gfsynr0, gfsynr1, gfsynr2);
714 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
718 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
719 struct io_pgtable_cfg *pgtbl_cfg)
724 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
725 struct arm_smmu_device *smmu = smmu_domain->smmu;
726 void __iomem *cb_base, *gr1_base;
728 gr1_base = ARM_SMMU_GR1(smmu);
729 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
730 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
732 if (smmu->version > ARM_SMMU_V1) {
735 * *Must* be initialised before CBAR thanks to VMID16
736 * architectural oversight affected some implementations.
739 reg = CBA2R_RW64_64BIT;
741 reg = CBA2R_RW64_32BIT;
743 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
748 if (smmu->version == ARM_SMMU_V1)
749 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
752 * Use the weakest shareability/memory types, so they are
753 * overridden by the ttbcr/pte.
756 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
757 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
759 reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT;
761 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
765 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
767 reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
768 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
770 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
771 reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
772 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1);
774 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
775 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
780 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
781 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
782 if (smmu->version > ARM_SMMU_V1) {
783 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
784 reg |= TTBCR2_SEP_UPSTREAM;
785 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
788 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
789 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
792 /* MAIRs (stage-1 only) */
794 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
795 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
796 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
797 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
801 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
803 reg |= SCTLR_S1_ASIDPNE;
807 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
810 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
811 struct arm_smmu_device *smmu)
813 int irq, start, ret = 0;
814 unsigned long ias, oas;
815 struct io_pgtable_ops *pgtbl_ops;
816 struct io_pgtable_cfg pgtbl_cfg;
817 enum io_pgtable_fmt fmt;
818 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
819 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
821 mutex_lock(&smmu_domain->init_mutex);
822 if (smmu_domain->smmu)
826 * Mapping the requested stage onto what we support is surprisingly
827 * complicated, mainly because the spec allows S1+S2 SMMUs without
828 * support for nested translation. That means we end up with the
831 * Requested Supported Actual
841 * Note that you can't actually request stage-2 mappings.
843 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
844 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
845 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
846 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
848 switch (smmu_domain->stage) {
849 case ARM_SMMU_DOMAIN_S1:
850 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
851 start = smmu->num_s2_context_banks;
853 oas = smmu->ipa_size;
854 if (IS_ENABLED(CONFIG_64BIT))
855 fmt = ARM_64_LPAE_S1;
857 fmt = ARM_32_LPAE_S1;
859 case ARM_SMMU_DOMAIN_NESTED:
861 * We will likely want to change this if/when KVM gets
864 case ARM_SMMU_DOMAIN_S2:
865 cfg->cbar = CBAR_TYPE_S2_TRANS;
867 ias = smmu->ipa_size;
869 if (IS_ENABLED(CONFIG_64BIT))
870 fmt = ARM_64_LPAE_S2;
872 fmt = ARM_32_LPAE_S2;
879 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
880 smmu->num_context_banks);
881 if (IS_ERR_VALUE(ret))
885 if (smmu->version == ARM_SMMU_V1) {
886 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
887 cfg->irptndx %= smmu->num_context_irqs;
889 cfg->irptndx = cfg->cbndx;
892 pgtbl_cfg = (struct io_pgtable_cfg) {
893 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
896 .tlb = &arm_smmu_gather_ops,
897 .iommu_dev = smmu->dev,
900 smmu_domain->smmu = smmu;
901 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
907 /* Update our support page sizes to reflect the page table format */
908 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
910 /* Initialise the context bank with our page table cfg */
911 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
914 * Request context fault interrupt. Do this last to avoid the
915 * handler seeing a half-initialised domain state.
917 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
918 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
919 "arm-smmu-context-fault", domain);
920 if (IS_ERR_VALUE(ret)) {
921 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
923 cfg->irptndx = INVALID_IRPTNDX;
926 mutex_unlock(&smmu_domain->init_mutex);
928 /* Publish page table ops for map/unmap */
929 smmu_domain->pgtbl_ops = pgtbl_ops;
933 smmu_domain->smmu = NULL;
935 mutex_unlock(&smmu_domain->init_mutex);
939 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
941 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
942 struct arm_smmu_device *smmu = smmu_domain->smmu;
943 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
944 void __iomem *cb_base;
951 * Disable the context bank and free the page tables before freeing
954 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
955 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
957 if (cfg->irptndx != INVALID_IRPTNDX) {
958 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
959 free_irq(irq, domain);
962 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
963 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
966 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
968 struct arm_smmu_domain *smmu_domain;
970 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
973 * Allocate the domain and initialise some of its data structures.
974 * We can't really do anything meaningful until we've added a
977 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
981 if (type == IOMMU_DOMAIN_DMA &&
982 iommu_get_dma_cookie(&smmu_domain->domain)) {
987 mutex_init(&smmu_domain->init_mutex);
988 spin_lock_init(&smmu_domain->pgtbl_lock);
990 return &smmu_domain->domain;
993 static void arm_smmu_domain_free(struct iommu_domain *domain)
995 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
998 * Free the domain resources. We assume that all devices have
999 * already been detached.
1001 iommu_put_dma_cookie(domain);
1002 arm_smmu_destroy_domain_context(domain);
1006 static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
1007 struct arm_smmu_master_cfg *cfg)
1010 struct arm_smmu_smr *smrs;
1011 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1013 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1019 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
1021 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1022 cfg->num_streamids);
1026 /* Allocate the SMRs on the SMMU */
1027 for (i = 0; i < cfg->num_streamids; ++i) {
1028 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1029 smmu->num_mapping_groups);
1030 if (IS_ERR_VALUE(idx)) {
1031 dev_err(smmu->dev, "failed to allocate free SMR\n");
1035 smrs[i] = (struct arm_smmu_smr) {
1037 .mask = 0, /* We don't currently share SMRs */
1038 .id = cfg->streamids[i],
1042 /* It worked! Now, poke the actual hardware */
1043 for (i = 0; i < cfg->num_streamids; ++i) {
1044 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1045 smrs[i].mask << SMR_MASK_SHIFT;
1046 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1054 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1059 static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
1060 struct arm_smmu_master_cfg *cfg)
1063 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1064 struct arm_smmu_smr *smrs = cfg->smrs;
1069 /* Invalidate the SMRs before freeing back to the allocator */
1070 for (i = 0; i < cfg->num_streamids; ++i) {
1071 u8 idx = smrs[i].idx;
1073 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1074 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1081 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1082 struct arm_smmu_master_cfg *cfg)
1085 struct arm_smmu_device *smmu = smmu_domain->smmu;
1086 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1088 /* Devices in an IOMMU group may already be configured */
1089 ret = arm_smmu_master_configure_smrs(smmu, cfg);
1091 return ret == -EEXIST ? 0 : ret;
1093 for (i = 0; i < cfg->num_streamids; ++i) {
1096 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1097 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
1098 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
1099 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1105 static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
1106 struct arm_smmu_master_cfg *cfg)
1109 struct arm_smmu_device *smmu = smmu_domain->smmu;
1110 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1112 /* An IOMMU group is torn down by the first device to be removed */
1113 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1117 * We *must* clear the S2CR first, because freeing the SMR means
1118 * that it can be re-allocated immediately.
1120 for (i = 0; i < cfg->num_streamids; ++i) {
1121 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1123 writel_relaxed(S2CR_TYPE_BYPASS,
1124 gr0_base + ARM_SMMU_GR0_S2CR(idx));
1127 arm_smmu_master_free_smrs(smmu, cfg);
1130 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1133 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1134 struct arm_smmu_device *smmu;
1135 struct arm_smmu_master_cfg *cfg;
1137 smmu = find_smmu_for_device(dev);
1139 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1143 if (dev->archdata.iommu) {
1144 dev_err(dev, "already attached to IOMMU domain\n");
1148 /* Ensure that the domain is finalised */
1149 ret = arm_smmu_init_domain_context(domain, smmu);
1150 if (IS_ERR_VALUE(ret))
1154 * Sanity check the domain. We don't support domains across
1157 if (smmu_domain->smmu != smmu) {
1159 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1160 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1164 /* Looks ok, so add the device to the domain */
1165 cfg = find_smmu_master_cfg(dev);
1169 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1171 dev->archdata.iommu = domain;
1175 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1177 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1178 struct arm_smmu_master_cfg *cfg;
1180 cfg = find_smmu_master_cfg(dev);
1184 dev->archdata.iommu = NULL;
1185 arm_smmu_domain_remove_master(smmu_domain, cfg);
1188 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1189 phys_addr_t paddr, size_t size, int prot)
1192 unsigned long flags;
1193 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1194 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1199 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1200 ret = ops->map(ops, iova, paddr, size, prot);
1201 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1205 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1209 unsigned long flags;
1210 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1211 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1216 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1217 ret = ops->unmap(ops, iova, size);
1218 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1222 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1225 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1226 struct arm_smmu_device *smmu = smmu_domain->smmu;
1227 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1228 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1229 struct device *dev = smmu->dev;
1230 void __iomem *cb_base;
1235 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1237 /* ATS1 registers can only be written atomically */
1238 va = iova & ~0xfffUL;
1239 if (smmu->version == ARM_SMMU_V2)
1240 smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1242 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
1244 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1245 !(tmp & ATSR_ACTIVE), 5, 50)) {
1247 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1249 return ops->iova_to_phys(ops, iova);
1252 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
1253 phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
1255 if (phys & CB_PAR_F) {
1256 dev_err(dev, "translation fault!\n");
1257 dev_err(dev, "PAR = 0x%llx\n", phys);
1261 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1264 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1268 unsigned long flags;
1269 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1270 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1275 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1276 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1277 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1278 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1280 ret = ops->iova_to_phys(ops, iova);
1283 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1288 static bool arm_smmu_capable(enum iommu_cap cap)
1291 case IOMMU_CAP_CACHE_COHERENCY:
1293 * Return true here as the SMMU can always send out coherent
1297 case IOMMU_CAP_INTR_REMAP:
1298 return true; /* MSIs are just memory writes */
1299 case IOMMU_CAP_NOEXEC:
1306 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1308 *((u16 *)data) = alias;
1309 return 0; /* Continue walking */
1312 static void __arm_smmu_release_pci_iommudata(void *data)
1317 static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1318 struct iommu_group *group)
1320 struct arm_smmu_master_cfg *cfg;
1324 cfg = iommu_group_get_iommudata(group);
1326 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1330 iommu_group_set_iommudata(group, cfg,
1331 __arm_smmu_release_pci_iommudata);
1334 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1338 * Assume Stream ID == Requester ID for now.
1339 * We need a way to describe the ID mappings in FDT.
1341 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1342 for (i = 0; i < cfg->num_streamids; ++i)
1343 if (cfg->streamids[i] == sid)
1346 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1347 if (i == cfg->num_streamids)
1348 cfg->streamids[cfg->num_streamids++] = sid;
1353 static int arm_smmu_init_platform_device(struct device *dev,
1354 struct iommu_group *group)
1356 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
1357 struct arm_smmu_master *master;
1362 master = find_smmu_master(smmu, dev->of_node);
1366 iommu_group_set_iommudata(group, &master->cfg, NULL);
1371 static int arm_smmu_add_device(struct device *dev)
1373 struct iommu_group *group;
1375 group = iommu_group_get_for_dev(dev);
1377 return PTR_ERR(group);
1379 iommu_group_put(group);
1383 static void arm_smmu_remove_device(struct device *dev)
1385 iommu_group_remove_device(dev);
1388 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1390 struct iommu_group *group;
1393 if (dev_is_pci(dev))
1394 group = pci_device_group(dev);
1396 group = generic_device_group(dev);
1401 if (dev_is_pci(dev))
1402 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1404 ret = arm_smmu_init_platform_device(dev, group);
1407 iommu_group_put(group);
1408 group = ERR_PTR(ret);
1414 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1415 enum iommu_attr attr, void *data)
1417 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1420 case DOMAIN_ATTR_NESTING:
1421 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1428 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1429 enum iommu_attr attr, void *data)
1432 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1434 mutex_lock(&smmu_domain->init_mutex);
1437 case DOMAIN_ATTR_NESTING:
1438 if (smmu_domain->smmu) {
1444 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1446 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1454 mutex_unlock(&smmu_domain->init_mutex);
1458 static struct iommu_ops arm_smmu_ops = {
1459 .capable = arm_smmu_capable,
1460 .domain_alloc = arm_smmu_domain_alloc,
1461 .domain_free = arm_smmu_domain_free,
1462 .attach_dev = arm_smmu_attach_dev,
1463 .detach_dev = arm_smmu_detach_dev,
1464 .map = arm_smmu_map,
1465 .unmap = arm_smmu_unmap,
1466 .map_sg = default_iommu_map_sg,
1467 .iova_to_phys = arm_smmu_iova_to_phys,
1468 .add_device = arm_smmu_add_device,
1469 .remove_device = arm_smmu_remove_device,
1470 .device_group = arm_smmu_device_group,
1471 .domain_get_attr = arm_smmu_domain_get_attr,
1472 .domain_set_attr = arm_smmu_domain_set_attr,
1473 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1476 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1478 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1479 void __iomem *cb_base;
1483 /* clear global FSR */
1484 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1485 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1487 /* Mark all SMRn as invalid and all S2CRn as bypass */
1488 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1489 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
1490 writel_relaxed(S2CR_TYPE_BYPASS,
1491 gr0_base + ARM_SMMU_GR0_S2CR(i));
1494 /* Make sure all context banks are disabled and clear CB_FSR */
1495 for (i = 0; i < smmu->num_context_banks; ++i) {
1496 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1497 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1498 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1501 /* Invalidate the TLB, just in case */
1502 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1503 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1505 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1507 /* Enable fault reporting */
1508 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1510 /* Disable TLB broadcasting. */
1511 reg |= (sCR0_VMIDPNE | sCR0_PTM);
1513 /* Enable client access, but bypass when no mapping is found */
1514 reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
1516 /* Disable forced broadcasting */
1519 /* Don't upgrade barriers */
1520 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1522 /* Push the button */
1523 __arm_smmu_tlb_sync(smmu);
1524 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1527 static int arm_smmu_id_size_to_bits(int size)
1546 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1549 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1551 bool cttw_dt, cttw_reg;
1553 dev_notice(smmu->dev, "probing hardware configuration...\n");
1554 dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
1557 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1559 /* Restrict available stages based on module parameter */
1560 if (force_stage == 1)
1561 id &= ~(ID0_S2TS | ID0_NTS);
1562 else if (force_stage == 2)
1563 id &= ~(ID0_S1TS | ID0_NTS);
1565 if (id & ID0_S1TS) {
1566 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1567 dev_notice(smmu->dev, "\tstage 1 translation\n");
1570 if (id & ID0_S2TS) {
1571 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1572 dev_notice(smmu->dev, "\tstage 2 translation\n");
1576 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1577 dev_notice(smmu->dev, "\tnested translation\n");
1580 if (!(smmu->features &
1581 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1582 dev_err(smmu->dev, "\tno translation support!\n");
1586 if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
1587 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1588 dev_notice(smmu->dev, "\taddress translation ops\n");
1592 * In order for DMA API calls to work properly, we must defer to what
1593 * the DT says about coherency, regardless of what the hardware claims.
1594 * Fortunately, this also opens up a workaround for systems where the
1595 * ID register value has ended up configured incorrectly.
1597 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1598 cttw_reg = !!(id & ID0_CTTW);
1600 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1601 if (cttw_dt || cttw_reg)
1602 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1603 cttw_dt ? "" : "non-");
1604 if (cttw_dt != cttw_reg)
1605 dev_notice(smmu->dev,
1606 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
1611 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1612 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1614 if (smmu->num_mapping_groups == 0) {
1616 "stream-matching supported, but no SMRs present!\n");
1620 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1621 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1622 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1623 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1625 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1626 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1627 if ((mask & sid) != sid) {
1629 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1634 dev_notice(smmu->dev,
1635 "\tstream matching with %u register groups, mask 0x%x",
1636 smmu->num_mapping_groups, mask);
1638 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1643 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1644 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1646 /* Check for size mismatch of SMMU address space from mapped region */
1647 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1648 size *= 2 << smmu->pgshift;
1649 if (smmu->size != size)
1651 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1654 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1655 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1656 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1657 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1660 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1661 smmu->num_context_banks, smmu->num_s2_context_banks);
1664 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1665 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1666 smmu->ipa_size = size;
1668 /* The output mask is also applied for bypass */
1669 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1670 smmu->pa_size = size;
1673 * What the page table walker can address actually depends on which
1674 * descriptor format is in use, but since a) we don't know that yet,
1675 * and b) it can vary per context bank, this will have to do...
1677 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1679 "failed to set DMA mask for table walker\n");
1681 if (smmu->version == ARM_SMMU_V1) {
1682 smmu->va_size = smmu->ipa_size;
1683 size = SZ_4K | SZ_2M | SZ_1G;
1685 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1686 smmu->va_size = arm_smmu_id_size_to_bits(size);
1687 #ifndef CONFIG_64BIT
1688 smmu->va_size = min(32UL, smmu->va_size);
1691 if (id & ID2_PTFS_4K)
1692 size |= SZ_4K | SZ_2M | SZ_1G;
1693 if (id & ID2_PTFS_16K)
1694 size |= SZ_16K | SZ_32M;
1695 if (id & ID2_PTFS_64K)
1696 size |= SZ_64K | SZ_512M;
1699 arm_smmu_ops.pgsize_bitmap &= size;
1700 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1702 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1703 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1704 smmu->va_size, smmu->ipa_size);
1706 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1707 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1708 smmu->ipa_size, smmu->pa_size);
1713 static const struct of_device_id arm_smmu_of_match[] = {
1714 { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 },
1715 { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 },
1716 { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 },
1717 { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 },
1718 { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 },
1721 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1723 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1725 const struct of_device_id *of_id;
1726 struct resource *res;
1727 struct arm_smmu_device *smmu;
1728 struct device *dev = &pdev->dev;
1729 struct rb_node *node;
1730 struct of_phandle_args masterspec;
1731 int num_irqs, i, err;
1733 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1735 dev_err(dev, "failed to allocate arm_smmu_device\n");
1740 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
1741 smmu->version = (enum arm_smmu_arch_version)of_id->data;
1743 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1744 smmu->base = devm_ioremap_resource(dev, res);
1745 if (IS_ERR(smmu->base))
1746 return PTR_ERR(smmu->base);
1747 smmu->size = resource_size(res);
1749 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1750 &smmu->num_global_irqs)) {
1751 dev_err(dev, "missing #global-interrupts property\n");
1756 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1758 if (num_irqs > smmu->num_global_irqs)
1759 smmu->num_context_irqs++;
1762 if (!smmu->num_context_irqs) {
1763 dev_err(dev, "found %d interrupts but expected at least %d\n",
1764 num_irqs, smmu->num_global_irqs + 1);
1768 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1771 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1775 for (i = 0; i < num_irqs; ++i) {
1776 int irq = platform_get_irq(pdev, i);
1779 dev_err(dev, "failed to get irq index %d\n", i);
1782 smmu->irqs[i] = irq;
1785 err = arm_smmu_device_cfg_probe(smmu);
1790 smmu->masters = RB_ROOT;
1791 while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1792 "#stream-id-cells", i,
1794 err = register_smmu_master(smmu, dev, &masterspec);
1796 dev_err(dev, "failed to add master %s\n",
1797 masterspec.np->name);
1798 goto out_put_masters;
1803 dev_notice(dev, "registered %d master devices\n", i);
1805 parse_driver_options(smmu);
1807 if (smmu->version > ARM_SMMU_V1 &&
1808 smmu->num_context_banks != smmu->num_context_irqs) {
1810 "found only %d context interrupt(s) but %d required\n",
1811 smmu->num_context_irqs, smmu->num_context_banks);
1813 goto out_put_masters;
1816 for (i = 0; i < smmu->num_global_irqs; ++i) {
1817 err = request_irq(smmu->irqs[i],
1818 arm_smmu_global_fault,
1820 "arm-smmu global fault",
1823 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1829 INIT_LIST_HEAD(&smmu->list);
1830 spin_lock(&arm_smmu_devices_lock);
1831 list_add(&smmu->list, &arm_smmu_devices);
1832 spin_unlock(&arm_smmu_devices_lock);
1834 arm_smmu_device_reset(smmu);
1839 free_irq(smmu->irqs[i], smmu);
1842 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1843 struct arm_smmu_master *master
1844 = container_of(node, struct arm_smmu_master, node);
1845 of_node_put(master->of_node);
1851 static int arm_smmu_device_remove(struct platform_device *pdev)
1854 struct device *dev = &pdev->dev;
1855 struct arm_smmu_device *curr, *smmu = NULL;
1856 struct rb_node *node;
1858 spin_lock(&arm_smmu_devices_lock);
1859 list_for_each_entry(curr, &arm_smmu_devices, list) {
1860 if (curr->dev == dev) {
1862 list_del(&smmu->list);
1866 spin_unlock(&arm_smmu_devices_lock);
1871 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1872 struct arm_smmu_master *master
1873 = container_of(node, struct arm_smmu_master, node);
1874 of_node_put(master->of_node);
1877 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
1878 dev_err(dev, "removing device with active domains!\n");
1880 for (i = 0; i < smmu->num_global_irqs; ++i)
1881 free_irq(smmu->irqs[i], smmu);
1883 /* Turn the thing off */
1884 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1888 static struct platform_driver arm_smmu_driver = {
1891 .of_match_table = of_match_ptr(arm_smmu_of_match),
1893 .probe = arm_smmu_device_dt_probe,
1894 .remove = arm_smmu_device_remove,
1897 static int __init arm_smmu_init(void)
1899 struct device_node *np;
1903 * Play nice with systems that don't have an ARM SMMU by checking that
1904 * an ARM SMMU exists in the system before proceeding with the driver
1905 * and IOMMU bus operation registration.
1907 np = of_find_matching_node(NULL, arm_smmu_of_match);
1913 ret = platform_driver_register(&arm_smmu_driver);
1917 /* Oh, for a proper bus abstraction */
1918 if (!iommu_present(&platform_bus_type))
1919 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1921 #ifdef CONFIG_ARM_AMBA
1922 if (!iommu_present(&amba_bustype))
1923 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
1927 if (!iommu_present(&pci_bus_type))
1928 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
1934 static void __exit arm_smmu_exit(void)
1936 return platform_driver_unregister(&arm_smmu_driver);
1939 subsys_initcall(arm_smmu_init);
1940 module_exit(arm_smmu_exit);
1942 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
1943 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1944 MODULE_LICENSE("GPL v2");