4 * Copyright (C) 2014 Renesas Electronics Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
11 #include <linux/bitmap.h>
12 #include <linux/delay.h>
13 #include <linux/dma-iommu.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/export.h>
17 #include <linux/interrupt.h>
19 #include <linux/iommu.h>
20 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/sizes.h>
24 #include <linux/slab.h>
26 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
27 #include <asm/dma-iommu.h>
28 #include <asm/pgalloc.h>
31 #include "io-pgtable.h"
33 #define IPMMU_CTX_MAX 1
35 struct ipmmu_vmsa_device {
38 struct list_head list;
40 unsigned int num_utlbs;
41 spinlock_t lock; /* Protects ctx and domains[] */
42 DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
43 struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
45 struct dma_iommu_mapping *mapping;
48 struct ipmmu_vmsa_domain {
49 struct ipmmu_vmsa_device *mmu;
50 struct iommu_domain io_domain;
52 struct io_pgtable_cfg cfg;
53 struct io_pgtable_ops *iop;
55 unsigned int context_id;
56 spinlock_t lock; /* Protects mappings */
59 struct ipmmu_vmsa_iommu_priv {
60 struct ipmmu_vmsa_device *mmu;
62 unsigned int num_utlbs;
64 struct list_head list;
67 static DEFINE_SPINLOCK(ipmmu_devices_lock);
68 static LIST_HEAD(ipmmu_devices);
70 static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
72 return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
76 static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev)
78 #if defined(CONFIG_ARM)
79 return dev->archdata.iommu;
81 return dev->iommu_fwspec->iommu_priv;
84 static void set_priv(struct device *dev, struct ipmmu_vmsa_iommu_priv *p)
86 #if defined(CONFIG_ARM)
87 dev->archdata.iommu = p;
89 dev->iommu_fwspec->iommu_priv = p;
93 #define TLB_LOOP_TIMEOUT 100 /* 100us */
95 /* -----------------------------------------------------------------------------
96 * Registers Definition
99 #define IM_NS_ALIAS_OFFSET 0x800
101 #define IM_CTX_SIZE 0x40
104 #define IMCTR_TRE (1 << 17)
105 #define IMCTR_AFE (1 << 16)
106 #define IMCTR_RTSEL_MASK (3 << 4)
107 #define IMCTR_RTSEL_SHIFT 4
108 #define IMCTR_TREN (1 << 3)
109 #define IMCTR_INTEN (1 << 2)
110 #define IMCTR_FLUSH (1 << 1)
111 #define IMCTR_MMUEN (1 << 0)
113 #define IMCAAR 0x0004
115 #define IMTTBCR 0x0008
116 #define IMTTBCR_EAE (1 << 31)
117 #define IMTTBCR_PMB (1 << 30)
118 #define IMTTBCR_SH1_NON_SHAREABLE (0 << 28)
119 #define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28)
120 #define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28)
121 #define IMTTBCR_SH1_MASK (3 << 28)
122 #define IMTTBCR_ORGN1_NC (0 << 26)
123 #define IMTTBCR_ORGN1_WB_WA (1 << 26)
124 #define IMTTBCR_ORGN1_WT (2 << 26)
125 #define IMTTBCR_ORGN1_WB (3 << 26)
126 #define IMTTBCR_ORGN1_MASK (3 << 26)
127 #define IMTTBCR_IRGN1_NC (0 << 24)
128 #define IMTTBCR_IRGN1_WB_WA (1 << 24)
129 #define IMTTBCR_IRGN1_WT (2 << 24)
130 #define IMTTBCR_IRGN1_WB (3 << 24)
131 #define IMTTBCR_IRGN1_MASK (3 << 24)
132 #define IMTTBCR_TSZ1_MASK (7 << 16)
133 #define IMTTBCR_TSZ1_SHIFT 16
134 #define IMTTBCR_SH0_NON_SHAREABLE (0 << 12)
135 #define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12)
136 #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12)
137 #define IMTTBCR_SH0_MASK (3 << 12)
138 #define IMTTBCR_ORGN0_NC (0 << 10)
139 #define IMTTBCR_ORGN0_WB_WA (1 << 10)
140 #define IMTTBCR_ORGN0_WT (2 << 10)
141 #define IMTTBCR_ORGN0_WB (3 << 10)
142 #define IMTTBCR_ORGN0_MASK (3 << 10)
143 #define IMTTBCR_IRGN0_NC (0 << 8)
144 #define IMTTBCR_IRGN0_WB_WA (1 << 8)
145 #define IMTTBCR_IRGN0_WT (2 << 8)
146 #define IMTTBCR_IRGN0_WB (3 << 8)
147 #define IMTTBCR_IRGN0_MASK (3 << 8)
148 #define IMTTBCR_SL0_LVL_2 (0 << 4)
149 #define IMTTBCR_SL0_LVL_1 (1 << 4)
150 #define IMTTBCR_TSZ0_MASK (7 << 0)
151 #define IMTTBCR_TSZ0_SHIFT O
153 #define IMBUSCR 0x000c
154 #define IMBUSCR_DVM (1 << 2)
155 #define IMBUSCR_BUSSEL_SYS (0 << 0)
156 #define IMBUSCR_BUSSEL_CCI (1 << 0)
157 #define IMBUSCR_BUSSEL_IMCAAR (2 << 0)
158 #define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0)
159 #define IMBUSCR_BUSSEL_MASK (3 << 0)
161 #define IMTTLBR0 0x0010
162 #define IMTTUBR0 0x0014
163 #define IMTTLBR1 0x0018
164 #define IMTTUBR1 0x001c
167 #define IMSTR_ERRLVL_MASK (3 << 12)
168 #define IMSTR_ERRLVL_SHIFT 12
169 #define IMSTR_ERRCODE_TLB_FORMAT (1 << 8)
170 #define IMSTR_ERRCODE_ACCESS_PERM (4 << 8)
171 #define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8)
172 #define IMSTR_ERRCODE_MASK (7 << 8)
173 #define IMSTR_MHIT (1 << 4)
174 #define IMSTR_ABORT (1 << 2)
175 #define IMSTR_PF (1 << 1)
176 #define IMSTR_TF (1 << 0)
178 #define IMMAIR0 0x0028
179 #define IMMAIR1 0x002c
180 #define IMMAIR_ATTR_MASK 0xff
181 #define IMMAIR_ATTR_DEVICE 0x04
182 #define IMMAIR_ATTR_NC 0x44
183 #define IMMAIR_ATTR_WBRWA 0xff
184 #define IMMAIR_ATTR_SHIFT(n) ((n) << 3)
185 #define IMMAIR_ATTR_IDX_NC 0
186 #define IMMAIR_ATTR_IDX_WBRWA 1
187 #define IMMAIR_ATTR_IDX_DEV 2
191 #define IMPCTR 0x0200
192 #define IMPSTR 0x0208
193 #define IMPEAR 0x020c
194 #define IMPMBA(n) (0x0280 + ((n) * 4))
195 #define IMPMBD(n) (0x02c0 + ((n) * 4))
197 #define IMUCTR(n) (0x0300 + ((n) * 16))
198 #define IMUCTR_FIXADDEN (1 << 31)
199 #define IMUCTR_FIXADD_MASK (0xff << 16)
200 #define IMUCTR_FIXADD_SHIFT 16
201 #define IMUCTR_TTSEL_MMU(n) ((n) << 4)
202 #define IMUCTR_TTSEL_PMB (8 << 4)
203 #define IMUCTR_TTSEL_MASK (15 << 4)
204 #define IMUCTR_FLUSH (1 << 1)
205 #define IMUCTR_MMUEN (1 << 0)
207 #define IMUASID(n) (0x0308 + ((n) * 16))
208 #define IMUASID_ASID8_MASK (0xff << 8)
209 #define IMUASID_ASID8_SHIFT 8
210 #define IMUASID_ASID0_MASK (0xff << 0)
211 #define IMUASID_ASID0_SHIFT 0
213 /* -----------------------------------------------------------------------------
217 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
219 return ioread32(mmu->base + offset);
222 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
225 iowrite32(data, mmu->base + offset);
228 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg)
230 return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg);
233 static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg,
236 ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data);
239 /* -----------------------------------------------------------------------------
240 * TLB and microTLB Management
243 /* Wait for any pending TLB invalidations to complete */
244 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
246 unsigned int count = 0;
248 while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) {
250 if (++count == TLB_LOOP_TIMEOUT) {
251 dev_err_ratelimited(domain->mmu->dev,
252 "TLB sync timed out -- MMU may be deadlocked\n");
259 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
263 reg = ipmmu_ctx_read(domain, IMCTR);
265 ipmmu_ctx_write(domain, IMCTR, reg);
267 ipmmu_tlb_sync(domain);
271 * Enable MMU translation for the microTLB.
273 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
276 struct ipmmu_vmsa_device *mmu = domain->mmu;
279 * TODO: Reference-count the microTLB as several bus masters can be
280 * connected to the same microTLB.
283 /* TODO: What should we set the ASID to ? */
284 ipmmu_write(mmu, IMUASID(utlb), 0);
285 /* TODO: Do we need to flush the microTLB ? */
286 ipmmu_write(mmu, IMUCTR(utlb),
287 IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
292 * Disable MMU translation for the microTLB.
294 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
297 struct ipmmu_vmsa_device *mmu = domain->mmu;
299 ipmmu_write(mmu, IMUCTR(utlb), 0);
302 static void ipmmu_tlb_flush_all(void *cookie)
304 struct ipmmu_vmsa_domain *domain = cookie;
306 ipmmu_tlb_invalidate(domain);
309 static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
310 size_t granule, bool leaf, void *cookie)
312 /* The hardware doesn't support selective TLB flush. */
315 static struct iommu_gather_ops ipmmu_gather_ops = {
316 .tlb_flush_all = ipmmu_tlb_flush_all,
317 .tlb_add_flush = ipmmu_tlb_add_flush,
318 .tlb_sync = ipmmu_tlb_flush_all,
321 /* -----------------------------------------------------------------------------
322 * Domain/Context Management
325 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
326 struct ipmmu_vmsa_domain *domain)
331 spin_lock_irqsave(&mmu->lock, flags);
333 ret = find_first_zero_bit(mmu->ctx, IPMMU_CTX_MAX);
334 if (ret != IPMMU_CTX_MAX) {
335 mmu->domains[ret] = domain;
336 set_bit(ret, mmu->ctx);
339 spin_unlock_irqrestore(&mmu->lock, flags);
344 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
350 * Allocate the page table operations.
352 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
353 * access, Long-descriptor format" that the NStable bit being set in a
354 * table descriptor will result in the NStable and NS bits of all child
355 * entries being ignored and considered as being set. The IPMMU seems
356 * not to comply with this, as it generates a secure access page fault
357 * if any of the NStable and NS bits isn't set when running in
360 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
361 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
362 domain->cfg.ias = 32;
363 domain->cfg.oas = 40;
364 domain->cfg.tlb = &ipmmu_gather_ops;
365 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
366 domain->io_domain.geometry.force_aperture = true;
368 * TODO: Add support for coherent walk through CCI with DVM and remove
369 * cache handling. For now, delegate it to the io-pgtable code.
371 domain->cfg.iommu_dev = domain->mmu->dev;
373 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
379 * Find an unused context.
381 ret = ipmmu_domain_allocate_context(domain->mmu, domain);
382 if (ret == IPMMU_CTX_MAX) {
383 free_io_pgtable_ops(domain->iop);
387 domain->context_id = ret;
390 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
391 ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
392 ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
396 * We use long descriptors with inner-shareable WBWA tables and allocate
397 * the whole 32-bit VA space to TTBR0.
399 ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE |
400 IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
401 IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
404 ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]);
407 ipmmu_ctx_write(domain, IMBUSCR,
408 ipmmu_ctx_read(domain, IMBUSCR) &
409 ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
413 * Clear all interrupt flags.
415 ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR));
419 * Enable the MMU and interrupt generation. The long-descriptor
420 * translation table format doesn't use TEX remapping. Don't enable AF
421 * software management as we have no use for it. Flush the TLB as
422 * required when modifying the context registers.
424 ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
429 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
430 unsigned int context_id)
434 spin_lock_irqsave(&mmu->lock, flags);
436 clear_bit(context_id, mmu->ctx);
437 mmu->domains[context_id] = NULL;
439 spin_unlock_irqrestore(&mmu->lock, flags);
442 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
445 * Disable the context. Flush the TLB as required when modifying the
448 * TODO: Is TLB flush really needed ?
450 ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH);
451 ipmmu_tlb_sync(domain);
452 ipmmu_domain_free_context(domain->mmu, domain->context_id);
455 /* -----------------------------------------------------------------------------
459 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
461 const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
462 struct ipmmu_vmsa_device *mmu = domain->mmu;
466 status = ipmmu_ctx_read(domain, IMSTR);
467 if (!(status & err_mask))
470 iova = ipmmu_ctx_read(domain, IMEAR);
473 * Clear the error status flags. Unlike traditional interrupt flag
474 * registers that must be cleared by writing 1, this status register
475 * seems to require 0. The error address register must be read before,
476 * otherwise its value will be 0.
478 ipmmu_ctx_write(domain, IMSTR, 0);
480 /* Log fatal errors. */
481 if (status & IMSTR_MHIT)
482 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
484 if (status & IMSTR_ABORT)
485 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
488 if (!(status & (IMSTR_PF | IMSTR_TF)))
492 * Try to handle page faults and translation faults.
494 * TODO: We need to look up the faulty device based on the I/O VA. Use
495 * the IOMMU device for now.
497 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
500 dev_err_ratelimited(mmu->dev,
501 "Unhandled fault: status 0x%08x iova 0x%08x\n",
507 static irqreturn_t ipmmu_irq(int irq, void *dev)
509 struct ipmmu_vmsa_device *mmu = dev;
510 irqreturn_t status = IRQ_NONE;
514 spin_lock_irqsave(&mmu->lock, flags);
517 * Check interrupts for all active contexts.
519 for (i = 0; i < IPMMU_CTX_MAX; i++) {
520 if (!mmu->domains[i])
522 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
523 status = IRQ_HANDLED;
526 spin_unlock_irqrestore(&mmu->lock, flags);
531 /* -----------------------------------------------------------------------------
535 static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
537 struct ipmmu_vmsa_domain *domain;
539 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
543 spin_lock_init(&domain->lock);
545 return &domain->io_domain;
548 static void ipmmu_domain_free(struct iommu_domain *io_domain)
550 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
553 * Free the domain resources. We assume that all devices have already
556 ipmmu_domain_destroy_context(domain);
557 free_io_pgtable_ops(domain->iop);
561 static int ipmmu_attach_device(struct iommu_domain *io_domain,
564 struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
565 struct ipmmu_vmsa_device *mmu = priv->mmu;
566 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
572 dev_err(dev, "Cannot attach to IPMMU\n");
576 spin_lock_irqsave(&domain->lock, flags);
579 /* The domain hasn't been used yet, initialize it. */
581 ret = ipmmu_domain_init_context(domain);
582 } else if (domain->mmu != mmu) {
584 * Something is wrong, we can't attach two devices using
585 * different IOMMUs to the same domain.
587 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
588 dev_name(mmu->dev), dev_name(domain->mmu->dev));
591 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
593 spin_unlock_irqrestore(&domain->lock, flags);
598 for (i = 0; i < priv->num_utlbs; ++i)
599 ipmmu_utlb_enable(domain, priv->utlbs[i]);
604 static void ipmmu_detach_device(struct iommu_domain *io_domain,
607 struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
608 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
611 for (i = 0; i < priv->num_utlbs; ++i)
612 ipmmu_utlb_disable(domain, priv->utlbs[i]);
615 * TODO: Optimize by disabling the context when no device is attached.
619 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
620 phys_addr_t paddr, size_t size, int prot)
622 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
627 return domain->iop->map(domain->iop, iova, paddr, size, prot);
630 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
633 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
635 return domain->iop->unmap(domain->iop, iova, size);
638 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
641 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
643 /* TODO: Is locking needed ? */
645 return domain->iop->iova_to_phys(domain->iop, iova);
648 static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,
649 unsigned int *utlbs, unsigned int num_utlbs)
653 for (i = 0; i < num_utlbs; ++i) {
654 struct of_phandle_args args;
657 ret = of_parse_phandle_with_args(dev->of_node, "iommus",
658 "#iommu-cells", i, &args);
662 of_node_put(args.np);
664 if (args.np != mmu->dev->of_node || args.args_count != 1)
667 utlbs[i] = args.args[0];
673 static int ipmmu_init_platform_device(struct device *dev)
675 struct ipmmu_vmsa_iommu_priv *priv;
676 struct ipmmu_vmsa_device *mmu;
682 /* Find the master corresponding to the device. */
684 num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus",
689 utlbs = kcalloc(num_utlbs, sizeof(*utlbs), GFP_KERNEL);
693 spin_lock(&ipmmu_devices_lock);
695 list_for_each_entry(mmu, &ipmmu_devices, list) {
696 ret = ipmmu_find_utlbs(mmu, dev, utlbs, num_utlbs);
699 * TODO Take a reference to the MMU to protect
700 * against device removal.
706 spin_unlock(&ipmmu_devices_lock);
711 for (i = 0; i < num_utlbs; ++i) {
712 if (utlbs[i] >= mmu->num_utlbs) {
718 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
726 priv->num_utlbs = num_utlbs;
736 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
738 static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
740 if (type != IOMMU_DOMAIN_UNMANAGED)
743 return __ipmmu_domain_alloc(type);
746 static int ipmmu_add_device(struct device *dev)
748 struct ipmmu_vmsa_device *mmu = NULL;
749 struct iommu_group *group;
753 dev_warn(dev, "IOMMU driver already assigned to device %s\n",
758 /* Create a device group and add the device to it. */
759 group = iommu_group_alloc();
761 dev_err(dev, "Failed to allocate IOMMU group\n");
762 ret = PTR_ERR(group);
766 ret = iommu_group_add_device(group, dev);
767 iommu_group_put(group);
770 dev_err(dev, "Failed to add device to IPMMU group\n");
775 ret = ipmmu_init_platform_device(dev);
780 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
781 * VAs. This will allocate a corresponding IOMMU domain.
784 * - Create one mapping per context (TLB).
785 * - Make the mapping size configurable ? We currently use a 2GB mapping
786 * at a 1GB offset to ensure that NULL VAs will fault.
788 mmu = to_priv(dev)->mmu;
790 struct dma_iommu_mapping *mapping;
792 mapping = arm_iommu_create_mapping(&platform_bus_type,
794 if (IS_ERR(mapping)) {
795 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
796 ret = PTR_ERR(mapping);
800 mmu->mapping = mapping;
803 /* Attach the ARM VA mapping to the device. */
804 ret = arm_iommu_attach_device(dev, mmu->mapping);
806 dev_err(dev, "Failed to attach device to VA mapping\n");
814 arm_iommu_release_mapping(mmu->mapping);
816 if (!IS_ERR_OR_NULL(group))
817 iommu_group_remove_device(dev);
819 kfree(to_priv(dev)->utlbs);
826 static void ipmmu_remove_device(struct device *dev)
828 struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
830 arm_iommu_detach_device(dev);
831 iommu_group_remove_device(dev);
839 static const struct iommu_ops ipmmu_ops = {
840 .domain_alloc = ipmmu_domain_alloc,
841 .domain_free = ipmmu_domain_free,
842 .attach_dev = ipmmu_attach_device,
843 .detach_dev = ipmmu_detach_device,
845 .unmap = ipmmu_unmap,
846 .map_sg = default_iommu_map_sg,
847 .iova_to_phys = ipmmu_iova_to_phys,
848 .add_device = ipmmu_add_device,
849 .remove_device = ipmmu_remove_device,
850 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
853 #endif /* !CONFIG_ARM && CONFIG_IOMMU_DMA */
855 #ifdef CONFIG_IOMMU_DMA
857 static DEFINE_SPINLOCK(ipmmu_slave_devices_lock);
858 static LIST_HEAD(ipmmu_slave_devices);
860 static struct iommu_domain *ipmmu_domain_alloc_dma(unsigned type)
862 struct iommu_domain *io_domain = NULL;
865 case IOMMU_DOMAIN_UNMANAGED:
866 io_domain = __ipmmu_domain_alloc(type);
869 case IOMMU_DOMAIN_DMA:
870 io_domain = __ipmmu_domain_alloc(type);
872 iommu_get_dma_cookie(io_domain);
879 static void ipmmu_domain_free_dma(struct iommu_domain *io_domain)
881 switch (io_domain->type) {
882 case IOMMU_DOMAIN_DMA:
883 iommu_put_dma_cookie(io_domain);
886 ipmmu_domain_free(io_domain);
891 static int ipmmu_add_device_dma(struct device *dev)
893 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
894 struct iommu_group *group;
897 * Only let through devices that have been verified in xlate()
898 * We may get called with dev->iommu_fwspec set to NULL.
900 if (!fwspec || !fwspec->iommu_priv)
903 group = iommu_group_get_for_dev(dev);
905 return PTR_ERR(group);
907 spin_lock(&ipmmu_slave_devices_lock);
908 list_add(&to_priv(dev)->list, &ipmmu_slave_devices);
909 spin_unlock(&ipmmu_slave_devices_lock);
913 static void ipmmu_remove_device_dma(struct device *dev)
915 struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
917 spin_lock(&ipmmu_slave_devices_lock);
918 list_del(&priv->list);
919 spin_unlock(&ipmmu_slave_devices_lock);
921 iommu_group_remove_device(dev);
924 static struct device *ipmmu_find_sibling_device(struct device *dev)
926 struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
927 struct ipmmu_vmsa_iommu_priv *sibling_priv = NULL;
930 spin_lock(&ipmmu_slave_devices_lock);
932 list_for_each_entry(sibling_priv, &ipmmu_slave_devices, list) {
933 if (priv == sibling_priv)
935 if (sibling_priv->mmu == priv->mmu) {
941 spin_unlock(&ipmmu_slave_devices_lock);
943 return found ? sibling_priv->dev : NULL;
946 static struct iommu_group *ipmmu_find_group_dma(struct device *dev)
948 struct iommu_group *group;
949 struct device *sibling;
951 sibling = ipmmu_find_sibling_device(dev);
953 group = iommu_group_get(sibling);
954 if (!sibling || IS_ERR(group))
955 group = generic_device_group(dev);
960 static int ipmmu_of_xlate_dma(struct device *dev,
961 struct of_phandle_args *spec)
963 /* If the IPMMU device is disabled in DT then return error
964 * to make sure the of_iommu code does not install ops
965 * even though the iommu device is disabled
967 if (!of_device_is_available(spec->np))
970 return ipmmu_init_platform_device(dev);
973 static const struct iommu_ops ipmmu_ops = {
974 .domain_alloc = ipmmu_domain_alloc_dma,
975 .domain_free = ipmmu_domain_free_dma,
976 .attach_dev = ipmmu_attach_device,
977 .detach_dev = ipmmu_detach_device,
979 .unmap = ipmmu_unmap,
980 .map_sg = default_iommu_map_sg,
981 .iova_to_phys = ipmmu_iova_to_phys,
982 .add_device = ipmmu_add_device_dma,
983 .remove_device = ipmmu_remove_device_dma,
984 .device_group = ipmmu_find_group_dma,
985 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
986 .of_xlate = ipmmu_of_xlate_dma,
989 #endif /* CONFIG_IOMMU_DMA */
991 /* -----------------------------------------------------------------------------
992 * Probe/remove and init
995 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
999 /* Disable all contexts. */
1000 for (i = 0; i < 4; ++i)
1001 ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
1004 static int ipmmu_probe(struct platform_device *pdev)
1006 struct ipmmu_vmsa_device *mmu;
1007 struct resource *res;
1011 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
1013 dev_err(&pdev->dev, "cannot allocate device data\n");
1017 mmu->dev = &pdev->dev;
1018 mmu->num_utlbs = 32;
1019 spin_lock_init(&mmu->lock);
1020 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
1022 /* Map I/O memory and request IRQ. */
1023 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1024 mmu->base = devm_ioremap_resource(&pdev->dev, res);
1025 if (IS_ERR(mmu->base))
1026 return PTR_ERR(mmu->base);
1029 * The IPMMU has two register banks, for secure and non-secure modes.
1030 * The bank mapped at the beginning of the IPMMU address space
1031 * corresponds to the running mode of the CPU. When running in secure
1032 * mode the non-secure register bank is also available at an offset.
1034 * Secure mode operation isn't clearly documented and is thus currently
1035 * not implemented in the driver. Furthermore, preliminary tests of
1036 * non-secure operation with the main register bank were not successful.
1037 * Offset the registers base unconditionally to point to the non-secure
1038 * alias space for now.
1040 mmu->base += IM_NS_ALIAS_OFFSET;
1042 irq = platform_get_irq(pdev, 0);
1044 dev_err(&pdev->dev, "no IRQ found\n");
1048 ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
1049 dev_name(&pdev->dev), mmu);
1051 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
1055 ipmmu_device_reset(mmu);
1058 * We can't create the ARM mapping here as it requires the bus to have
1059 * an IOMMU, which only happens when bus_set_iommu() is called in
1060 * ipmmu_init() after the probe function returns.
1063 spin_lock(&ipmmu_devices_lock);
1064 list_add(&mmu->list, &ipmmu_devices);
1065 spin_unlock(&ipmmu_devices_lock);
1067 platform_set_drvdata(pdev, mmu);
1072 static int ipmmu_remove(struct platform_device *pdev)
1074 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
1076 spin_lock(&ipmmu_devices_lock);
1077 list_del(&mmu->list);
1078 spin_unlock(&ipmmu_devices_lock);
1080 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
1081 arm_iommu_release_mapping(mmu->mapping);
1084 ipmmu_device_reset(mmu);
1089 static const struct of_device_id ipmmu_of_ids[] = {
1090 { .compatible = "renesas,ipmmu-vmsa", },
1094 static struct platform_driver ipmmu_driver = {
1096 .name = "ipmmu-vmsa",
1097 .of_match_table = of_match_ptr(ipmmu_of_ids),
1099 .probe = ipmmu_probe,
1100 .remove = ipmmu_remove,
1103 static int __init ipmmu_init(void)
1107 ret = platform_driver_register(&ipmmu_driver);
1111 if (!iommu_present(&platform_bus_type))
1112 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1117 static void __exit ipmmu_exit(void)
1119 return platform_driver_unregister(&ipmmu_driver);
1122 subsys_initcall(ipmmu_init);
1123 module_exit(ipmmu_exit);
1125 MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
1126 MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1127 MODULE_LICENSE("GPL v2");