]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/iommu/mtk_iommu_v1.c
platform/x86: intel_telemetry_debugfs: fix oops when load/unload module
[karo-tx-linux.git] / drivers / iommu / mtk_iommu_v1.c
1 /*
2  * Copyright (c) 2015-2016 MediaTek Inc.
3  * Author: Honghui Zhang <honghui.zhang@mediatek.com>
4  *
5  * Based on driver/iommu/mtk_iommu.c
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  */
16 #include <linux/bootmem.h>
17 #include <linux/bug.h>
18 #include <linux/clk.h>
19 #include <linux/component.h>
20 #include <linux/device.h>
21 #include <linux/dma-iommu.h>
22 #include <linux/err.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/iommu.h>
26 #include <linux/iopoll.h>
27 #include <linux/kmemleak.h>
28 #include <linux/list.h>
29 #include <linux/of_address.h>
30 #include <linux/of_iommu.h>
31 #include <linux/of_irq.h>
32 #include <linux/of_platform.h>
33 #include <linux/platform_device.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <asm/barrier.h>
37 #include <asm/dma-iommu.h>
38 #include <linux/module.h>
39 #include <dt-bindings/memory/mt2701-larb-port.h>
40 #include <soc/mediatek/smi.h>
41 #include "mtk_iommu.h"
42
43 #define REG_MMU_PT_BASE_ADDR                    0x000
44
45 #define F_ALL_INVLD                             0x2
46 #define F_MMU_INV_RANGE                         0x1
47 #define F_INVLD_EN0                             BIT(0)
48 #define F_INVLD_EN1                             BIT(1)
49
50 #define F_MMU_FAULT_VA_MSK                      0xfffff000
51 #define MTK_PROTECT_PA_ALIGN                    128
52
53 #define REG_MMU_CTRL_REG                        0x210
54 #define F_MMU_CTRL_COHERENT_EN                  BIT(8)
55 #define REG_MMU_IVRP_PADDR                      0x214
56 #define REG_MMU_INT_CONTROL                     0x220
57 #define F_INT_TRANSLATION_FAULT                 BIT(0)
58 #define F_INT_MAIN_MULTI_HIT_FAULT              BIT(1)
59 #define F_INT_INVALID_PA_FAULT                  BIT(2)
60 #define F_INT_ENTRY_REPLACEMENT_FAULT           BIT(3)
61 #define F_INT_TABLE_WALK_FAULT                  BIT(4)
62 #define F_INT_TLB_MISS_FAULT                    BIT(5)
63 #define F_INT_PFH_DMA_FIFO_OVERFLOW             BIT(6)
64 #define F_INT_MISS_DMA_FIFO_OVERFLOW            BIT(7)
65
66 #define F_MMU_TF_PROTECT_SEL(prot)              (((prot) & 0x3) << 5)
67 #define F_INT_CLR_BIT                           BIT(12)
68
69 #define REG_MMU_FAULT_ST                        0x224
70 #define REG_MMU_FAULT_VA                        0x228
71 #define REG_MMU_INVLD_PA                        0x22C
72 #define REG_MMU_INT_ID                          0x388
73 #define REG_MMU_INVALIDATE                      0x5c0
74 #define REG_MMU_INVLD_START_A                   0x5c4
75 #define REG_MMU_INVLD_END_A                     0x5c8
76
77 #define REG_MMU_INV_SEL                         0x5d8
78 #define REG_MMU_STANDARD_AXI_MODE               0x5e8
79
80 #define REG_MMU_DCM                             0x5f0
81 #define F_MMU_DCM_ON                            BIT(1)
82 #define REG_MMU_CPE_DONE                        0x60c
83 #define F_DESC_VALID                            0x2
84 #define F_DESC_NONSEC                           BIT(3)
85 #define MT2701_M4U_TF_LARB(TF)                  (6 - (((TF) >> 13) & 0x7))
86 #define MT2701_M4U_TF_PORT(TF)                  (((TF) >> 8) & 0xF)
87 /* MTK generation one iommu HW only support 4K size mapping */
88 #define MT2701_IOMMU_PAGE_SHIFT                 12
89 #define MT2701_IOMMU_PAGE_SIZE                  (1UL << MT2701_IOMMU_PAGE_SHIFT)
90
91 /*
92  * MTK m4u support 4GB iova address space, and only support 4K page
93  * mapping. So the pagetable size should be exactly as 4M.
94  */
95 #define M2701_IOMMU_PGT_SIZE                    SZ_4M
96
97 struct mtk_iommu_domain {
98         spinlock_t                      pgtlock; /* lock for page table */
99         struct iommu_domain             domain;
100         u32                             *pgt_va;
101         dma_addr_t                      pgt_pa;
102         struct mtk_iommu_data           *data;
103 };
104
105 static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
106 {
107         return container_of(dom, struct mtk_iommu_domain, domain);
108 }
109
110 static const int mt2701_m4u_in_larb[] = {
111         LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
112         LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
113 };
114
115 static inline int mt2701_m4u_to_larb(int id)
116 {
117         int i;
118
119         for (i = ARRAY_SIZE(mt2701_m4u_in_larb) - 1; i >= 0; i--)
120                 if ((id) >= mt2701_m4u_in_larb[i])
121                         return i;
122
123         return 0;
124 }
125
126 static inline int mt2701_m4u_to_port(int id)
127 {
128         int larb = mt2701_m4u_to_larb(id);
129
130         return id - mt2701_m4u_in_larb[larb];
131 }
132
133 static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
134 {
135         writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
136                         data->base + REG_MMU_INV_SEL);
137         writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
138         wmb(); /* Make sure the tlb flush all done */
139 }
140
141 static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data,
142                                 unsigned long iova, size_t size)
143 {
144         int ret;
145         u32 tmp;
146
147         writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
148                 data->base + REG_MMU_INV_SEL);
149         writel_relaxed(iova & F_MMU_FAULT_VA_MSK,
150                 data->base + REG_MMU_INVLD_START_A);
151         writel_relaxed((iova + size - 1) & F_MMU_FAULT_VA_MSK,
152                 data->base + REG_MMU_INVLD_END_A);
153         writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
154
155         ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
156                                 tmp, tmp != 0, 10, 100000);
157         if (ret) {
158                 dev_warn(data->dev,
159                          "Partial TLB flush timed out, falling back to full flush\n");
160                 mtk_iommu_tlb_flush_all(data);
161         }
162         /* Clear the CPE status */
163         writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
164 }
165
166 static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
167 {
168         struct mtk_iommu_data *data = dev_id;
169         struct mtk_iommu_domain *dom = data->m4u_dom;
170         u32 int_state, regval, fault_iova, fault_pa;
171         unsigned int fault_larb, fault_port;
172
173         /* Read error information from registers */
174         int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST);
175         fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
176
177         fault_iova &= F_MMU_FAULT_VA_MSK;
178         fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
179         regval = readl_relaxed(data->base + REG_MMU_INT_ID);
180         fault_larb = MT2701_M4U_TF_LARB(regval);
181         fault_port = MT2701_M4U_TF_PORT(regval);
182
183         /*
184          * MTK v1 iommu HW could not determine whether the fault is read or
185          * write fault, report as read fault.
186          */
187         if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
188                         IOMMU_FAULT_READ))
189                 dev_err_ratelimited(data->dev,
190                         "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d\n",
191                         int_state, fault_iova, fault_pa,
192                         fault_larb, fault_port);
193
194         /* Interrupt clear */
195         regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL);
196         regval |= F_INT_CLR_BIT;
197         writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
198
199         mtk_iommu_tlb_flush_all(data);
200
201         return IRQ_HANDLED;
202 }
203
204 static void mtk_iommu_config(struct mtk_iommu_data *data,
205                              struct device *dev, bool enable)
206 {
207         struct mtk_smi_larb_iommu    *larb_mmu;
208         unsigned int                 larbid, portid;
209         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
210         int i;
211
212         for (i = 0; i < fwspec->num_ids; ++i) {
213                 larbid = mt2701_m4u_to_larb(fwspec->ids[i]);
214                 portid = mt2701_m4u_to_port(fwspec->ids[i]);
215                 larb_mmu = &data->smi_imu.larb_imu[larbid];
216
217                 dev_dbg(dev, "%s iommu port: %d\n",
218                         enable ? "enable" : "disable", portid);
219
220                 if (enable)
221                         larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
222                 else
223                         larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
224         }
225 }
226
227 static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
228 {
229         struct mtk_iommu_domain *dom = data->m4u_dom;
230
231         spin_lock_init(&dom->pgtlock);
232
233         dom->pgt_va = dma_zalloc_coherent(data->dev,
234                                 M2701_IOMMU_PGT_SIZE,
235                                 &dom->pgt_pa, GFP_KERNEL);
236         if (!dom->pgt_va)
237                 return -ENOMEM;
238
239         writel(dom->pgt_pa, data->base + REG_MMU_PT_BASE_ADDR);
240
241         dom->data = data;
242
243         return 0;
244 }
245
246 static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
247 {
248         struct mtk_iommu_domain *dom;
249
250         if (type != IOMMU_DOMAIN_UNMANAGED)
251                 return NULL;
252
253         dom = kzalloc(sizeof(*dom), GFP_KERNEL);
254         if (!dom)
255                 return NULL;
256
257         return &dom->domain;
258 }
259
260 static void mtk_iommu_domain_free(struct iommu_domain *domain)
261 {
262         struct mtk_iommu_domain *dom = to_mtk_domain(domain);
263         struct mtk_iommu_data *data = dom->data;
264
265         dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
266                         dom->pgt_va, dom->pgt_pa);
267         kfree(to_mtk_domain(domain));
268 }
269
270 static int mtk_iommu_attach_device(struct iommu_domain *domain,
271                                    struct device *dev)
272 {
273         struct mtk_iommu_domain *dom = to_mtk_domain(domain);
274         struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
275         int ret;
276
277         if (!data)
278                 return -ENODEV;
279
280         if (!data->m4u_dom) {
281                 data->m4u_dom = dom;
282                 ret = mtk_iommu_domain_finalise(data);
283                 if (ret) {
284                         data->m4u_dom = NULL;
285                         return ret;
286                 }
287         }
288
289         mtk_iommu_config(data, dev, true);
290         return 0;
291 }
292
293 static void mtk_iommu_detach_device(struct iommu_domain *domain,
294                                     struct device *dev)
295 {
296         struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
297
298         if (!data)
299                 return;
300
301         mtk_iommu_config(data, dev, false);
302 }
303
304 static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
305                          phys_addr_t paddr, size_t size, int prot)
306 {
307         struct mtk_iommu_domain *dom = to_mtk_domain(domain);
308         unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
309         unsigned long flags;
310         unsigned int i;
311         u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
312         u32 pabase = (u32)paddr;
313         int map_size = 0;
314
315         spin_lock_irqsave(&dom->pgtlock, flags);
316         for (i = 0; i < page_num; i++) {
317                 if (pgt_base_iova[i]) {
318                         memset(pgt_base_iova, 0, i * sizeof(u32));
319                         break;
320                 }
321                 pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC;
322                 pabase += MT2701_IOMMU_PAGE_SIZE;
323                 map_size += MT2701_IOMMU_PAGE_SIZE;
324         }
325
326         spin_unlock_irqrestore(&dom->pgtlock, flags);
327
328         mtk_iommu_tlb_flush_range(dom->data, iova, size);
329
330         return map_size == size ? 0 : -EEXIST;
331 }
332
333 static size_t mtk_iommu_unmap(struct iommu_domain *domain,
334                               unsigned long iova, size_t size)
335 {
336         struct mtk_iommu_domain *dom = to_mtk_domain(domain);
337         unsigned long flags;
338         u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
339         unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
340
341         spin_lock_irqsave(&dom->pgtlock, flags);
342         memset(pgt_base_iova, 0, page_num * sizeof(u32));
343         spin_unlock_irqrestore(&dom->pgtlock, flags);
344
345         mtk_iommu_tlb_flush_range(dom->data, iova, size);
346
347         return size;
348 }
349
350 static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
351                                           dma_addr_t iova)
352 {
353         struct mtk_iommu_domain *dom = to_mtk_domain(domain);
354         unsigned long flags;
355         phys_addr_t pa;
356
357         spin_lock_irqsave(&dom->pgtlock, flags);
358         pa = *(dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT));
359         pa = pa & (~(MT2701_IOMMU_PAGE_SIZE - 1));
360         spin_unlock_irqrestore(&dom->pgtlock, flags);
361
362         return pa;
363 }
364
365 static struct iommu_ops mtk_iommu_ops;
366
367 /*
368  * MTK generation one iommu HW only support one iommu domain, and all the client
369  * sharing the same iova address space.
370  */
371 static int mtk_iommu_create_mapping(struct device *dev,
372                                     struct of_phandle_args *args)
373 {
374         struct mtk_iommu_data *data;
375         struct platform_device *m4updev;
376         struct dma_iommu_mapping *mtk_mapping;
377         struct device *m4udev;
378         int ret;
379
380         if (args->args_count != 1) {
381                 dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
382                         args->args_count);
383                 return -EINVAL;
384         }
385
386         if (!dev->iommu_fwspec) {
387                 ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_ops);
388                 if (ret)
389                         return ret;
390         } else if (dev->iommu_fwspec->ops != &mtk_iommu_ops) {
391                 return -EINVAL;
392         }
393
394         if (!dev->iommu_fwspec->iommu_priv) {
395                 /* Get the m4u device */
396                 m4updev = of_find_device_by_node(args->np);
397                 if (WARN_ON(!m4updev))
398                         return -EINVAL;
399
400                 dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
401         }
402
403         ret = iommu_fwspec_add_ids(dev, args->args, 1);
404         if (ret)
405                 return ret;
406
407         data = dev->iommu_fwspec->iommu_priv;
408         m4udev = data->dev;
409         mtk_mapping = m4udev->archdata.iommu;
410         if (!mtk_mapping) {
411                 /* MTK iommu support 4GB iova address space. */
412                 mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
413                                                 0, 1ULL << 32);
414                 if (IS_ERR(mtk_mapping))
415                         return PTR_ERR(mtk_mapping);
416
417                 m4udev->archdata.iommu = mtk_mapping;
418         }
419
420         ret = arm_iommu_attach_device(dev, mtk_mapping);
421         if (ret)
422                 goto err_release_mapping;
423
424         return 0;
425
426 err_release_mapping:
427         arm_iommu_release_mapping(mtk_mapping);
428         m4udev->archdata.iommu = NULL;
429         return ret;
430 }
431
432 static int mtk_iommu_add_device(struct device *dev)
433 {
434         struct of_phandle_args iommu_spec;
435         struct of_phandle_iterator it;
436         struct mtk_iommu_data *data;
437         struct iommu_group *group;
438         int err;
439
440         of_for_each_phandle(&it, err, dev->of_node, "iommus",
441                         "#iommu-cells", 0) {
442                 int count = of_phandle_iterator_args(&it, iommu_spec.args,
443                                         MAX_PHANDLE_ARGS);
444                 iommu_spec.np = of_node_get(it.node);
445                 iommu_spec.args_count = count;
446
447                 mtk_iommu_create_mapping(dev, &iommu_spec);
448                 of_node_put(iommu_spec.np);
449         }
450
451         if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
452                 return -ENODEV; /* Not a iommu client device */
453
454         data = dev->iommu_fwspec->iommu_priv;
455         iommu_device_link(&data->iommu, dev);
456
457         group = iommu_group_get_for_dev(dev);
458         if (IS_ERR(group))
459                 return PTR_ERR(group);
460
461         iommu_group_put(group);
462         return 0;
463 }
464
465 static void mtk_iommu_remove_device(struct device *dev)
466 {
467         struct mtk_iommu_data *data;
468
469         if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
470                 return;
471
472         data = dev->iommu_fwspec->iommu_priv;
473         iommu_device_unlink(&data->iommu, dev);
474
475         iommu_group_remove_device(dev);
476         iommu_fwspec_free(dev);
477 }
478
479 static struct iommu_group *mtk_iommu_device_group(struct device *dev)
480 {
481         struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
482
483         if (!data)
484                 return ERR_PTR(-ENODEV);
485
486         /* All the client devices are in the same m4u iommu-group */
487         if (!data->m4u_group) {
488                 data->m4u_group = iommu_group_alloc();
489                 if (IS_ERR(data->m4u_group))
490                         dev_err(dev, "Failed to allocate M4U IOMMU group\n");
491         } else {
492                 iommu_group_ref_get(data->m4u_group);
493         }
494         return data->m4u_group;
495 }
496
497 static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
498 {
499         u32 regval;
500         int ret;
501
502         ret = clk_prepare_enable(data->bclk);
503         if (ret) {
504                 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
505                 return ret;
506         }
507
508         regval = F_MMU_CTRL_COHERENT_EN | F_MMU_TF_PROTECT_SEL(2);
509         writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
510
511         regval = F_INT_TRANSLATION_FAULT |
512                 F_INT_MAIN_MULTI_HIT_FAULT |
513                 F_INT_INVALID_PA_FAULT |
514                 F_INT_ENTRY_REPLACEMENT_FAULT |
515                 F_INT_TABLE_WALK_FAULT |
516                 F_INT_TLB_MISS_FAULT |
517                 F_INT_PFH_DMA_FIFO_OVERFLOW |
518                 F_INT_MISS_DMA_FIFO_OVERFLOW;
519         writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
520
521         /* protect memory,hw will write here while translation fault */
522         writel_relaxed(data->protect_base,
523                         data->base + REG_MMU_IVRP_PADDR);
524
525         writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM);
526
527         if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
528                              dev_name(data->dev), (void *)data)) {
529                 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
530                 clk_disable_unprepare(data->bclk);
531                 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
532                 return -ENODEV;
533         }
534
535         return 0;
536 }
537
538 static struct iommu_ops mtk_iommu_ops = {
539         .domain_alloc   = mtk_iommu_domain_alloc,
540         .domain_free    = mtk_iommu_domain_free,
541         .attach_dev     = mtk_iommu_attach_device,
542         .detach_dev     = mtk_iommu_detach_device,
543         .map            = mtk_iommu_map,
544         .unmap          = mtk_iommu_unmap,
545         .map_sg         = default_iommu_map_sg,
546         .iova_to_phys   = mtk_iommu_iova_to_phys,
547         .add_device     = mtk_iommu_add_device,
548         .remove_device  = mtk_iommu_remove_device,
549         .device_group   = mtk_iommu_device_group,
550         .pgsize_bitmap  = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
551 };
552
553 static const struct of_device_id mtk_iommu_of_ids[] = {
554         { .compatible = "mediatek,mt2701-m4u", },
555         {}
556 };
557
558 static const struct component_master_ops mtk_iommu_com_ops = {
559         .bind           = mtk_iommu_bind,
560         .unbind         = mtk_iommu_unbind,
561 };
562
563 static int mtk_iommu_probe(struct platform_device *pdev)
564 {
565         struct mtk_iommu_data           *data;
566         struct device                   *dev = &pdev->dev;
567         struct resource                 *res;
568         struct component_match          *match = NULL;
569         struct of_phandle_args          larb_spec;
570         struct of_phandle_iterator      it;
571         void                            *protect;
572         int                             larb_nr, ret, err;
573
574         data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
575         if (!data)
576                 return -ENOMEM;
577
578         data->dev = dev;
579
580         /* Protect memory. HW will access here while translation fault.*/
581         protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2,
582                         GFP_KERNEL | GFP_DMA);
583         if (!protect)
584                 return -ENOMEM;
585         data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
586
587         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
588         data->base = devm_ioremap_resource(dev, res);
589         if (IS_ERR(data->base))
590                 return PTR_ERR(data->base);
591
592         data->irq = platform_get_irq(pdev, 0);
593         if (data->irq < 0)
594                 return data->irq;
595
596         data->bclk = devm_clk_get(dev, "bclk");
597         if (IS_ERR(data->bclk))
598                 return PTR_ERR(data->bclk);
599
600         larb_nr = 0;
601         of_for_each_phandle(&it, err, dev->of_node,
602                         "mediatek,larbs", NULL, 0) {
603                 struct platform_device *plarbdev;
604                 int count = of_phandle_iterator_args(&it, larb_spec.args,
605                                         MAX_PHANDLE_ARGS);
606
607                 if (count)
608                         continue;
609
610                 larb_spec.np = of_node_get(it.node);
611                 if (!of_device_is_available(larb_spec.np))
612                         continue;
613
614                 plarbdev = of_find_device_by_node(larb_spec.np);
615                 if (!plarbdev) {
616                         plarbdev = of_platform_device_create(
617                                                 larb_spec.np, NULL,
618                                                 platform_bus_type.dev_root);
619                         if (!plarbdev) {
620                                 of_node_put(larb_spec.np);
621                                 return -EPROBE_DEFER;
622                         }
623                 }
624
625                 data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev;
626                 component_match_add_release(dev, &match, release_of,
627                                             compare_of, larb_spec.np);
628                 larb_nr++;
629         }
630
631         data->smi_imu.larb_nr = larb_nr;
632
633         platform_set_drvdata(pdev, data);
634
635         ret = mtk_iommu_hw_init(data);
636         if (ret)
637                 return ret;
638
639         ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
640                                      dev_name(&pdev->dev));
641         if (ret)
642                 return ret;
643
644         iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
645
646         ret = iommu_device_register(&data->iommu);
647         if (ret)
648                 return ret;
649
650         if (!iommu_present(&platform_bus_type))
651                 bus_set_iommu(&platform_bus_type,  &mtk_iommu_ops);
652
653         return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
654 }
655
656 static int mtk_iommu_remove(struct platform_device *pdev)
657 {
658         struct mtk_iommu_data *data = platform_get_drvdata(pdev);
659
660         iommu_device_sysfs_remove(&data->iommu);
661         iommu_device_unregister(&data->iommu);
662
663         if (iommu_present(&platform_bus_type))
664                 bus_set_iommu(&platform_bus_type, NULL);
665
666         clk_disable_unprepare(data->bclk);
667         devm_free_irq(&pdev->dev, data->irq, data);
668         component_master_del(&pdev->dev, &mtk_iommu_com_ops);
669         return 0;
670 }
671
672 static int __maybe_unused mtk_iommu_suspend(struct device *dev)
673 {
674         struct mtk_iommu_data *data = dev_get_drvdata(dev);
675         struct mtk_iommu_suspend_reg *reg = &data->reg;
676         void __iomem *base = data->base;
677
678         reg->standard_axi_mode = readl_relaxed(base +
679                                                REG_MMU_STANDARD_AXI_MODE);
680         reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM);
681         reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
682         reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL);
683         return 0;
684 }
685
686 static int __maybe_unused mtk_iommu_resume(struct device *dev)
687 {
688         struct mtk_iommu_data *data = dev_get_drvdata(dev);
689         struct mtk_iommu_suspend_reg *reg = &data->reg;
690         void __iomem *base = data->base;
691
692         writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR);
693         writel_relaxed(reg->standard_axi_mode,
694                        base + REG_MMU_STANDARD_AXI_MODE);
695         writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM);
696         writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
697         writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL);
698         writel_relaxed(data->protect_base, base + REG_MMU_IVRP_PADDR);
699         return 0;
700 }
701
702 static const struct dev_pm_ops mtk_iommu_pm_ops = {
703         SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
704 };
705
706 static struct platform_driver mtk_iommu_driver = {
707         .probe  = mtk_iommu_probe,
708         .remove = mtk_iommu_remove,
709         .driver = {
710                 .name = "mtk-iommu",
711                 .of_match_table = mtk_iommu_of_ids,
712                 .pm = &mtk_iommu_pm_ops,
713         }
714 };
715
716 static int __init m4u_init(void)
717 {
718         return platform_driver_register(&mtk_iommu_driver);
719 }
720
721 static void __exit m4u_exit(void)
722 {
723         return platform_driver_unregister(&mtk_iommu_driver);
724 }
725
726 subsys_initcall(m4u_init);
727 module_exit(m4u_exit);
728
729 MODULE_DESCRIPTION("IOMMU API for MTK architected m4u v1 implementations");
730 MODULE_AUTHOR("Honghui Zhang <honghui.zhang@mediatek.com>");
731 MODULE_LICENSE("GPL v2");