]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/iommu/exynos-iommu.c
iommu/exynos: Return proper errors from getting clocks
[karo-tx-linux.git] / drivers / iommu / exynos-iommu.c
1 /*
2  * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
3  *              http://www.samsung.com
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  */
9
10 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
11 #define DEBUG
12 #endif
13
14 #include <linux/clk.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/io.h>
18 #include <linux/iommu.h>
19 #include <linux/interrupt.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/of_iommu.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/slab.h>
27 #include <linux/dma-iommu.h>
28
29 typedef u32 sysmmu_iova_t;
30 typedef u32 sysmmu_pte_t;
31
32 /* We do not consider super section mapping (16MB) */
33 #define SECT_ORDER 20
34 #define LPAGE_ORDER 16
35 #define SPAGE_ORDER 12
36
37 #define SECT_SIZE (1 << SECT_ORDER)
38 #define LPAGE_SIZE (1 << LPAGE_ORDER)
39 #define SPAGE_SIZE (1 << SPAGE_ORDER)
40
41 #define SECT_MASK (~(SECT_SIZE - 1))
42 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
43 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
44
45 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
46                            ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
47 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
48 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
49 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
50                           ((*(sent) & 3) == 1))
51 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
52
53 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
54 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
55 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
56
57 /*
58  * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
59  * v5.0 introduced support for 36bit physical address space by shifting
60  * all page entry values by 4 bits.
61  * All SYSMMU controllers in the system support the address spaces of the same
62  * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
63  * value (0 or 4).
64  */
65 static short PG_ENT_SHIFT = -1;
66 #define SYSMMU_PG_ENT_SHIFT 0
67 #define SYSMMU_V5_PG_ENT_SHIFT 4
68
69 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
70 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
71 #define section_offs(iova) (iova & (SECT_SIZE - 1))
72 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
73 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
74 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
75 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
76
77 #define NUM_LV1ENTRIES 4096
78 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
79
80 static u32 lv1ent_offset(sysmmu_iova_t iova)
81 {
82         return iova >> SECT_ORDER;
83 }
84
85 static u32 lv2ent_offset(sysmmu_iova_t iova)
86 {
87         return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
88 }
89
90 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
91 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
92
93 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
94 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
95
96 #define mk_lv1ent_sect(pa) ((pa >> PG_ENT_SHIFT) | 2)
97 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
98 #define mk_lv2ent_lpage(pa) ((pa >> PG_ENT_SHIFT) | 1)
99 #define mk_lv2ent_spage(pa) ((pa >> PG_ENT_SHIFT) | 2)
100
101 #define CTRL_ENABLE     0x5
102 #define CTRL_BLOCK      0x7
103 #define CTRL_DISABLE    0x0
104
105 #define CFG_LRU         0x1
106 #define CFG_QOS(n)      ((n & 0xF) << 7)
107 #define CFG_ACGEN       (1 << 24) /* System MMU 3.3 only */
108 #define CFG_SYSSEL      (1 << 22) /* System MMU 3.2 only */
109 #define CFG_FLPDCACHE   (1 << 20) /* System MMU 3.2+ only */
110
111 /* common registers */
112 #define REG_MMU_CTRL            0x000
113 #define REG_MMU_CFG             0x004
114 #define REG_MMU_STATUS          0x008
115 #define REG_MMU_VERSION         0x034
116
117 #define MMU_MAJ_VER(val)        ((val) >> 7)
118 #define MMU_MIN_VER(val)        ((val) & 0x7F)
119 #define MMU_RAW_VER(reg)        (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
120
121 #define MAKE_MMU_VER(maj, min)  ((((maj) & 0xF) << 7) | ((min) & 0x7F))
122
123 /* v1.x - v3.x registers */
124 #define REG_MMU_FLUSH           0x00C
125 #define REG_MMU_FLUSH_ENTRY     0x010
126 #define REG_PT_BASE_ADDR        0x014
127 #define REG_INT_STATUS          0x018
128 #define REG_INT_CLEAR           0x01C
129
130 #define REG_PAGE_FAULT_ADDR     0x024
131 #define REG_AW_FAULT_ADDR       0x028
132 #define REG_AR_FAULT_ADDR       0x02C
133 #define REG_DEFAULT_SLAVE_ADDR  0x030
134
135 /* v5.x registers */
136 #define REG_V5_PT_BASE_PFN      0x00C
137 #define REG_V5_MMU_FLUSH_ALL    0x010
138 #define REG_V5_MMU_FLUSH_ENTRY  0x014
139 #define REG_V5_INT_STATUS       0x060
140 #define REG_V5_INT_CLEAR        0x064
141 #define REG_V5_FAULT_AR_VA      0x070
142 #define REG_V5_FAULT_AW_VA      0x080
143
144 #define has_sysmmu(dev)         (dev->archdata.iommu != NULL)
145
146 static struct device *dma_dev;
147 static struct kmem_cache *lv2table_kmem_cache;
148 static sysmmu_pte_t *zero_lv2_table;
149 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
150
151 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
152 {
153         return pgtable + lv1ent_offset(iova);
154 }
155
156 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
157 {
158         return (sysmmu_pte_t *)phys_to_virt(
159                                 lv2table_base(sent)) + lv2ent_offset(iova);
160 }
161
162 /*
163  * IOMMU fault information register
164  */
165 struct sysmmu_fault_info {
166         unsigned int bit;       /* bit number in STATUS register */
167         unsigned short addr_reg; /* register to read VA fault address */
168         const char *name;       /* human readable fault name */
169         unsigned int type;      /* fault type for report_iommu_fault */
170 };
171
172 static const struct sysmmu_fault_info sysmmu_faults[] = {
173         { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
174         { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
175         { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
176         { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
177         { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
178         { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
179         { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
180         { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
181 };
182
183 static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
184         { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
185         { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
186         { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
187         { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
188         { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
189         { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
190         { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
191         { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
192         { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
193         { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
194 };
195
196 /*
197  * This structure is attached to dev.archdata.iommu of the master device
198  * on device add, contains a list of SYSMMU controllers defined by device tree,
199  * which are bound to given master device. It is usually referenced by 'owner'
200  * pointer.
201 */
202 struct exynos_iommu_owner {
203         struct list_head controllers;   /* list of sysmmu_drvdata.owner_node */
204         struct iommu_domain *domain;    /* domain this device is attached */
205 };
206
207 /*
208  * This structure exynos specific generalization of struct iommu_domain.
209  * It contains list of SYSMMU controllers from all master devices, which has
210  * been attached to this domain and page tables of IO address space defined by
211  * it. It is usually referenced by 'domain' pointer.
212  */
213 struct exynos_iommu_domain {
214         struct list_head clients; /* list of sysmmu_drvdata.domain_node */
215         sysmmu_pte_t *pgtable;  /* lv1 page table, 16KB */
216         short *lv2entcnt;       /* free lv2 entry counter for each section */
217         spinlock_t lock;        /* lock for modyfying list of clients */
218         spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
219         struct iommu_domain domain; /* generic domain data structure */
220 };
221
222 /*
223  * This structure hold all data of a single SYSMMU controller, this includes
224  * hw resources like registers and clocks, pointers and list nodes to connect
225  * it to all other structures, internal state and parameters read from device
226  * tree. It is usually referenced by 'data' pointer.
227  */
228 struct sysmmu_drvdata {
229         struct device *sysmmu;          /* SYSMMU controller device */
230         struct device *master;          /* master device (owner) */
231         void __iomem *sfrbase;          /* our registers */
232         struct clk *clk;                /* SYSMMU's clock */
233         struct clk *aclk;               /* SYSMMU's aclk clock */
234         struct clk *pclk;               /* SYSMMU's pclk clock */
235         struct clk *clk_master;         /* master's device clock */
236         int activations;                /* number of calls to sysmmu_enable */
237         spinlock_t lock;                /* lock for modyfying state */
238         struct exynos_iommu_domain *domain; /* domain we belong to */
239         struct list_head domain_node;   /* node for domain clients list */
240         struct list_head owner_node;    /* node for owner controllers list */
241         phys_addr_t pgtable;            /* assigned page table structure */
242         unsigned int version;           /* our version */
243 };
244
245 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
246 {
247         return container_of(dom, struct exynos_iommu_domain, domain);
248 }
249
250 static bool set_sysmmu_active(struct sysmmu_drvdata *data)
251 {
252         /* return true if the System MMU was not active previously
253            and it needs to be initialized */
254         return ++data->activations == 1;
255 }
256
257 static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
258 {
259         /* return true if the System MMU is needed to be disabled */
260         BUG_ON(data->activations < 1);
261         return --data->activations == 0;
262 }
263
264 static bool is_sysmmu_active(struct sysmmu_drvdata *data)
265 {
266         return data->activations > 0;
267 }
268
269 static void sysmmu_unblock(struct sysmmu_drvdata *data)
270 {
271         writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
272 }
273
274 static bool sysmmu_block(struct sysmmu_drvdata *data)
275 {
276         int i = 120;
277
278         writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
279         while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
280                 --i;
281
282         if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
283                 sysmmu_unblock(data);
284                 return false;
285         }
286
287         return true;
288 }
289
290 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
291 {
292         if (MMU_MAJ_VER(data->version) < 5)
293                 writel(0x1, data->sfrbase + REG_MMU_FLUSH);
294         else
295                 writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
296 }
297
298 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
299                                 sysmmu_iova_t iova, unsigned int num_inv)
300 {
301         unsigned int i;
302
303         for (i = 0; i < num_inv; i++) {
304                 if (MMU_MAJ_VER(data->version) < 5)
305                         writel((iova & SPAGE_MASK) | 1,
306                                      data->sfrbase + REG_MMU_FLUSH_ENTRY);
307                 else
308                         writel((iova & SPAGE_MASK) | 1,
309                                      data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
310                 iova += SPAGE_SIZE;
311         }
312 }
313
314 static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
315 {
316         if (MMU_MAJ_VER(data->version) < 5)
317                 writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
318         else
319                 writel(pgd >> PAGE_SHIFT,
320                              data->sfrbase + REG_V5_PT_BASE_PFN);
321
322         __sysmmu_tlb_invalidate(data);
323 }
324
325 static void __sysmmu_get_version(struct sysmmu_drvdata *data)
326 {
327         u32 ver;
328
329         clk_enable(data->clk_master);
330         clk_enable(data->clk);
331         clk_enable(data->pclk);
332         clk_enable(data->aclk);
333
334         ver = readl(data->sfrbase + REG_MMU_VERSION);
335
336         /* controllers on some SoCs don't report proper version */
337         if (ver == 0x80000001u)
338                 data->version = MAKE_MMU_VER(1, 0);
339         else
340                 data->version = MMU_RAW_VER(ver);
341
342         dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
343                 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
344
345         clk_disable(data->aclk);
346         clk_disable(data->pclk);
347         clk_disable(data->clk);
348         clk_disable(data->clk_master);
349 }
350
351 static void show_fault_information(struct sysmmu_drvdata *data,
352                                    const struct sysmmu_fault_info *finfo,
353                                    sysmmu_iova_t fault_addr)
354 {
355         sysmmu_pte_t *ent;
356
357         dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n",
358                 finfo->name, fault_addr, &data->pgtable);
359         ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
360         dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
361         if (lv1ent_page(ent)) {
362                 ent = page_entry(ent, fault_addr);
363                 dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
364         }
365 }
366
367 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
368 {
369         /* SYSMMU is in blocked state when interrupt occurred. */
370         struct sysmmu_drvdata *data = dev_id;
371         const struct sysmmu_fault_info *finfo;
372         unsigned int i, n, itype;
373         sysmmu_iova_t fault_addr = -1;
374         unsigned short reg_status, reg_clear;
375         int ret = -ENOSYS;
376
377         WARN_ON(!is_sysmmu_active(data));
378
379         if (MMU_MAJ_VER(data->version) < 5) {
380                 reg_status = REG_INT_STATUS;
381                 reg_clear = REG_INT_CLEAR;
382                 finfo = sysmmu_faults;
383                 n = ARRAY_SIZE(sysmmu_faults);
384         } else {
385                 reg_status = REG_V5_INT_STATUS;
386                 reg_clear = REG_V5_INT_CLEAR;
387                 finfo = sysmmu_v5_faults;
388                 n = ARRAY_SIZE(sysmmu_v5_faults);
389         }
390
391         spin_lock(&data->lock);
392
393         clk_enable(data->clk_master);
394
395         itype = __ffs(readl(data->sfrbase + reg_status));
396         for (i = 0; i < n; i++, finfo++)
397                 if (finfo->bit == itype)
398                         break;
399         /* unknown/unsupported fault */
400         BUG_ON(i == n);
401
402         /* print debug message */
403         fault_addr = readl(data->sfrbase + finfo->addr_reg);
404         show_fault_information(data, finfo, fault_addr);
405
406         if (data->domain)
407                 ret = report_iommu_fault(&data->domain->domain,
408                                         data->master, fault_addr, finfo->type);
409         /* fault is not recovered by fault handler */
410         BUG_ON(ret != 0);
411
412         writel(1 << itype, data->sfrbase + reg_clear);
413
414         sysmmu_unblock(data);
415
416         clk_disable(data->clk_master);
417
418         spin_unlock(&data->lock);
419
420         return IRQ_HANDLED;
421 }
422
423 static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
424 {
425         clk_enable(data->clk_master);
426
427         writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
428         writel(0, data->sfrbase + REG_MMU_CFG);
429
430         clk_disable(data->aclk);
431         clk_disable(data->pclk);
432         clk_disable(data->clk);
433         clk_disable(data->clk_master);
434 }
435
436 static bool __sysmmu_disable(struct sysmmu_drvdata *data)
437 {
438         bool disabled;
439         unsigned long flags;
440
441         spin_lock_irqsave(&data->lock, flags);
442
443         disabled = set_sysmmu_inactive(data);
444
445         if (disabled) {
446                 data->pgtable = 0;
447                 data->domain = NULL;
448
449                 __sysmmu_disable_nocount(data);
450
451                 dev_dbg(data->sysmmu, "Disabled\n");
452         } else  {
453                 dev_dbg(data->sysmmu, "%d times left to disable\n",
454                                         data->activations);
455         }
456
457         spin_unlock_irqrestore(&data->lock, flags);
458
459         return disabled;
460 }
461
462 static void __sysmmu_init_config(struct sysmmu_drvdata *data)
463 {
464         unsigned int cfg;
465
466         if (data->version <= MAKE_MMU_VER(3, 1))
467                 cfg = CFG_LRU | CFG_QOS(15);
468         else if (data->version <= MAKE_MMU_VER(3, 2))
469                 cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
470         else
471                 cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
472
473         writel(cfg, data->sfrbase + REG_MMU_CFG);
474 }
475
476 static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
477 {
478         clk_enable(data->clk_master);
479         clk_enable(data->clk);
480         clk_enable(data->pclk);
481         clk_enable(data->aclk);
482
483         writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
484
485         __sysmmu_init_config(data);
486
487         __sysmmu_set_ptbase(data, data->pgtable);
488
489         writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
490
491         clk_disable(data->clk_master);
492 }
493
494 static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
495                            struct exynos_iommu_domain *domain)
496 {
497         int ret = 0;
498         unsigned long flags;
499
500         spin_lock_irqsave(&data->lock, flags);
501         if (set_sysmmu_active(data)) {
502                 data->pgtable = pgtable;
503                 data->domain = domain;
504
505                 __sysmmu_enable_nocount(data);
506
507                 dev_dbg(data->sysmmu, "Enabled\n");
508         } else {
509                 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
510
511                 dev_dbg(data->sysmmu, "already enabled\n");
512         }
513
514         if (WARN_ON(ret < 0))
515                 set_sysmmu_inactive(data); /* decrement count */
516
517         spin_unlock_irqrestore(&data->lock, flags);
518
519         return ret;
520 }
521
522 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
523                                             sysmmu_iova_t iova)
524 {
525         unsigned long flags;
526
527         clk_enable(data->clk_master);
528
529         spin_lock_irqsave(&data->lock, flags);
530         if (is_sysmmu_active(data)) {
531                 if (data->version >= MAKE_MMU_VER(3, 3))
532                         __sysmmu_tlb_invalidate_entry(data, iova, 1);
533         }
534         spin_unlock_irqrestore(&data->lock, flags);
535
536         clk_disable(data->clk_master);
537 }
538
539 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
540                                         sysmmu_iova_t iova, size_t size)
541 {
542         unsigned long flags;
543
544         spin_lock_irqsave(&data->lock, flags);
545         if (is_sysmmu_active(data)) {
546                 unsigned int num_inv = 1;
547
548                 clk_enable(data->clk_master);
549
550                 /*
551                  * L2TLB invalidation required
552                  * 4KB page: 1 invalidation
553                  * 64KB page: 16 invalidations
554                  * 1MB page: 64 invalidations
555                  * because it is set-associative TLB
556                  * with 8-way and 64 sets.
557                  * 1MB page can be cached in one of all sets.
558                  * 64KB page can be one of 16 consecutive sets.
559                  */
560                 if (MMU_MAJ_VER(data->version) == 2)
561                         num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
562
563                 if (sysmmu_block(data)) {
564                         __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
565                         sysmmu_unblock(data);
566                 }
567                 clk_disable(data->clk_master);
568         } else {
569                 dev_dbg(data->master,
570                         "disabled. Skipping TLB invalidation @ %#x\n", iova);
571         }
572         spin_unlock_irqrestore(&data->lock, flags);
573 }
574
575 static int __init exynos_sysmmu_probe(struct platform_device *pdev)
576 {
577         int irq, ret;
578         struct device *dev = &pdev->dev;
579         struct sysmmu_drvdata *data;
580         struct resource *res;
581
582         data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
583         if (!data)
584                 return -ENOMEM;
585
586         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
587         data->sfrbase = devm_ioremap_resource(dev, res);
588         if (IS_ERR(data->sfrbase))
589                 return PTR_ERR(data->sfrbase);
590
591         irq = platform_get_irq(pdev, 0);
592         if (irq <= 0) {
593                 dev_err(dev, "Unable to find IRQ resource\n");
594                 return irq;
595         }
596
597         ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
598                                 dev_name(dev), data);
599         if (ret) {
600                 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
601                 return ret;
602         }
603
604         data->clk = devm_clk_get(dev, "sysmmu");
605         if (PTR_ERR(data->clk) == -ENOENT)
606                 data->clk = NULL;
607         else if (IS_ERR(data->clk))
608                 return PTR_ERR(data->clk);
609         ret = clk_prepare(data->clk);
610         if (ret)
611                 return ret;
612
613         data->aclk = devm_clk_get(dev, "aclk");
614         if (PTR_ERR(data->aclk) == -ENOENT)
615                 data->aclk = NULL;
616         else if (IS_ERR(data->aclk))
617                 return PTR_ERR(data->aclk);
618         ret = clk_prepare(data->aclk);
619         if (ret)
620                 return ret;
621
622         data->pclk = devm_clk_get(dev, "pclk");
623         if (PTR_ERR(data->pclk) == -ENOENT)
624                 data->pclk = NULL;
625         else if (IS_ERR(data->pclk))
626                 return PTR_ERR(data->pclk);
627         ret = clk_prepare(data->pclk);
628         if (ret)
629                 return ret;
630
631         if (!data->clk && (!data->aclk || !data->pclk)) {
632                 dev_err(dev, "Failed to get device clock(s)!\n");
633                 return -ENOSYS;
634         }
635
636         data->clk_master = devm_clk_get(dev, "master");
637         if (PTR_ERR(data->clk_master) == -ENOENT)
638                 data->clk_master = NULL;
639         else if (IS_ERR(data->clk_master))
640                 return PTR_ERR(data->clk_master);
641         ret = clk_prepare(data->clk_master);
642         if (ret)
643                 return ret;
644
645         data->sysmmu = dev;
646         spin_lock_init(&data->lock);
647
648         platform_set_drvdata(pdev, data);
649
650         __sysmmu_get_version(data);
651         if (PG_ENT_SHIFT < 0) {
652                 if (MMU_MAJ_VER(data->version) < 5)
653                         PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
654                 else
655                         PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
656         }
657
658         pm_runtime_enable(dev);
659
660         return 0;
661 }
662
663 #ifdef CONFIG_PM_SLEEP
664 static int exynos_sysmmu_suspend(struct device *dev)
665 {
666         struct sysmmu_drvdata *data = dev_get_drvdata(dev);
667
668         dev_dbg(dev, "suspend\n");
669         if (is_sysmmu_active(data)) {
670                 __sysmmu_disable_nocount(data);
671                 pm_runtime_put(dev);
672         }
673         return 0;
674 }
675
676 static int exynos_sysmmu_resume(struct device *dev)
677 {
678         struct sysmmu_drvdata *data = dev_get_drvdata(dev);
679
680         dev_dbg(dev, "resume\n");
681         if (is_sysmmu_active(data)) {
682                 pm_runtime_get_sync(dev);
683                 __sysmmu_enable_nocount(data);
684         }
685         return 0;
686 }
687 #endif
688
689 static const struct dev_pm_ops sysmmu_pm_ops = {
690         SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume)
691 };
692
693 static const struct of_device_id sysmmu_of_match[] __initconst = {
694         { .compatible   = "samsung,exynos-sysmmu", },
695         { },
696 };
697
698 static struct platform_driver exynos_sysmmu_driver __refdata = {
699         .probe  = exynos_sysmmu_probe,
700         .driver = {
701                 .name           = "exynos-sysmmu",
702                 .of_match_table = sysmmu_of_match,
703                 .pm             = &sysmmu_pm_ops,
704                 .suppress_bind_attrs = true,
705         }
706 };
707
708 static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
709 {
710         dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
711                                 DMA_TO_DEVICE);
712         *ent = val;
713         dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
714                                    DMA_TO_DEVICE);
715 }
716
717 static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
718 {
719         struct exynos_iommu_domain *domain;
720         dma_addr_t handle;
721         int i;
722
723         /* Check if correct PTE offsets are initialized */
724         BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
725
726         domain = kzalloc(sizeof(*domain), GFP_KERNEL);
727         if (!domain)
728                 return NULL;
729
730         if (type == IOMMU_DOMAIN_DMA) {
731                 if (iommu_get_dma_cookie(&domain->domain) != 0)
732                         goto err_pgtable;
733         } else if (type != IOMMU_DOMAIN_UNMANAGED) {
734                 goto err_pgtable;
735         }
736
737         domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
738         if (!domain->pgtable)
739                 goto err_dma_cookie;
740
741         domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
742         if (!domain->lv2entcnt)
743                 goto err_counter;
744
745         /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
746         for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
747                 domain->pgtable[i + 0] = ZERO_LV2LINK;
748                 domain->pgtable[i + 1] = ZERO_LV2LINK;
749                 domain->pgtable[i + 2] = ZERO_LV2LINK;
750                 domain->pgtable[i + 3] = ZERO_LV2LINK;
751                 domain->pgtable[i + 4] = ZERO_LV2LINK;
752                 domain->pgtable[i + 5] = ZERO_LV2LINK;
753                 domain->pgtable[i + 6] = ZERO_LV2LINK;
754                 domain->pgtable[i + 7] = ZERO_LV2LINK;
755         }
756
757         handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
758                                 DMA_TO_DEVICE);
759         /* For mapping page table entries we rely on dma == phys */
760         BUG_ON(handle != virt_to_phys(domain->pgtable));
761
762         spin_lock_init(&domain->lock);
763         spin_lock_init(&domain->pgtablelock);
764         INIT_LIST_HEAD(&domain->clients);
765
766         domain->domain.geometry.aperture_start = 0;
767         domain->domain.geometry.aperture_end   = ~0UL;
768         domain->domain.geometry.force_aperture = true;
769
770         return &domain->domain;
771
772 err_counter:
773         free_pages((unsigned long)domain->pgtable, 2);
774 err_dma_cookie:
775         if (type == IOMMU_DOMAIN_DMA)
776                 iommu_put_dma_cookie(&domain->domain);
777 err_pgtable:
778         kfree(domain);
779         return NULL;
780 }
781
782 static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
783 {
784         struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
785         struct sysmmu_drvdata *data, *next;
786         unsigned long flags;
787         int i;
788
789         WARN_ON(!list_empty(&domain->clients));
790
791         spin_lock_irqsave(&domain->lock, flags);
792
793         list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
794                 if (__sysmmu_disable(data))
795                         data->master = NULL;
796                 list_del_init(&data->domain_node);
797         }
798
799         spin_unlock_irqrestore(&domain->lock, flags);
800
801         if (iommu_domain->type == IOMMU_DOMAIN_DMA)
802                 iommu_put_dma_cookie(iommu_domain);
803
804         dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
805                          DMA_TO_DEVICE);
806
807         for (i = 0; i < NUM_LV1ENTRIES; i++)
808                 if (lv1ent_page(domain->pgtable + i)) {
809                         phys_addr_t base = lv2table_base(domain->pgtable + i);
810
811                         dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
812                                          DMA_TO_DEVICE);
813                         kmem_cache_free(lv2table_kmem_cache,
814                                         phys_to_virt(base));
815                 }
816
817         free_pages((unsigned long)domain->pgtable, 2);
818         free_pages((unsigned long)domain->lv2entcnt, 1);
819         kfree(domain);
820 }
821
822 static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
823                                     struct device *dev)
824 {
825         struct exynos_iommu_owner *owner = dev->archdata.iommu;
826         struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
827         phys_addr_t pagetable = virt_to_phys(domain->pgtable);
828         struct sysmmu_drvdata *data, *next;
829         unsigned long flags;
830         bool found = false;
831
832         if (!has_sysmmu(dev) || owner->domain != iommu_domain)
833                 return;
834
835         spin_lock_irqsave(&domain->lock, flags);
836         list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
837                 if (data->master == dev) {
838                         if (__sysmmu_disable(data)) {
839                                 data->master = NULL;
840                                 list_del_init(&data->domain_node);
841                         }
842                         pm_runtime_put(data->sysmmu);
843                         found = true;
844                 }
845         }
846         spin_unlock_irqrestore(&domain->lock, flags);
847
848         owner->domain = NULL;
849
850         if (found)
851                 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
852                                         __func__, &pagetable);
853         else
854                 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
855 }
856
857 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
858                                    struct device *dev)
859 {
860         struct exynos_iommu_owner *owner = dev->archdata.iommu;
861         struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
862         struct sysmmu_drvdata *data;
863         phys_addr_t pagetable = virt_to_phys(domain->pgtable);
864         unsigned long flags;
865         int ret = -ENODEV;
866
867         if (!has_sysmmu(dev))
868                 return -ENODEV;
869
870         if (owner->domain)
871                 exynos_iommu_detach_device(owner->domain, dev);
872
873         list_for_each_entry(data, &owner->controllers, owner_node) {
874                 pm_runtime_get_sync(data->sysmmu);
875                 ret = __sysmmu_enable(data, pagetable, domain);
876                 if (ret >= 0) {
877                         data->master = dev;
878
879                         spin_lock_irqsave(&domain->lock, flags);
880                         list_add_tail(&data->domain_node, &domain->clients);
881                         spin_unlock_irqrestore(&domain->lock, flags);
882                 }
883         }
884
885         if (ret < 0) {
886                 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
887                                         __func__, &pagetable);
888                 return ret;
889         }
890
891         owner->domain = iommu_domain;
892         dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
893                 __func__, &pagetable, (ret == 0) ? "" : ", again");
894
895         return ret;
896 }
897
898 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
899                 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
900 {
901         if (lv1ent_section(sent)) {
902                 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
903                 return ERR_PTR(-EADDRINUSE);
904         }
905
906         if (lv1ent_fault(sent)) {
907                 sysmmu_pte_t *pent;
908                 bool need_flush_flpd_cache = lv1ent_zero(sent);
909
910                 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
911                 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
912                 if (!pent)
913                         return ERR_PTR(-ENOMEM);
914
915                 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
916                 kmemleak_ignore(pent);
917                 *pgcounter = NUM_LV2ENTRIES;
918                 dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
919
920                 /*
921                  * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
922                  * FLPD cache may cache the address of zero_l2_table. This
923                  * function replaces the zero_l2_table with new L2 page table
924                  * to write valid mappings.
925                  * Accessing the valid area may cause page fault since FLPD
926                  * cache may still cache zero_l2_table for the valid area
927                  * instead of new L2 page table that has the mapping
928                  * information of the valid area.
929                  * Thus any replacement of zero_l2_table with other valid L2
930                  * page table must involve FLPD cache invalidation for System
931                  * MMU v3.3.
932                  * FLPD cache invalidation is performed with TLB invalidation
933                  * by VPN without blocking. It is safe to invalidate TLB without
934                  * blocking because the target address of TLB invalidation is
935                  * not currently mapped.
936                  */
937                 if (need_flush_flpd_cache) {
938                         struct sysmmu_drvdata *data;
939
940                         spin_lock(&domain->lock);
941                         list_for_each_entry(data, &domain->clients, domain_node)
942                                 sysmmu_tlb_invalidate_flpdcache(data, iova);
943                         spin_unlock(&domain->lock);
944                 }
945         }
946
947         return page_entry(sent, iova);
948 }
949
950 static int lv1set_section(struct exynos_iommu_domain *domain,
951                           sysmmu_pte_t *sent, sysmmu_iova_t iova,
952                           phys_addr_t paddr, short *pgcnt)
953 {
954         if (lv1ent_section(sent)) {
955                 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
956                         iova);
957                 return -EADDRINUSE;
958         }
959
960         if (lv1ent_page(sent)) {
961                 if (*pgcnt != NUM_LV2ENTRIES) {
962                         WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
963                                 iova);
964                         return -EADDRINUSE;
965                 }
966
967                 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
968                 *pgcnt = 0;
969         }
970
971         update_pte(sent, mk_lv1ent_sect(paddr));
972
973         spin_lock(&domain->lock);
974         if (lv1ent_page_zero(sent)) {
975                 struct sysmmu_drvdata *data;
976                 /*
977                  * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
978                  * entry by speculative prefetch of SLPD which has no mapping.
979                  */
980                 list_for_each_entry(data, &domain->clients, domain_node)
981                         sysmmu_tlb_invalidate_flpdcache(data, iova);
982         }
983         spin_unlock(&domain->lock);
984
985         return 0;
986 }
987
988 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
989                                                                 short *pgcnt)
990 {
991         if (size == SPAGE_SIZE) {
992                 if (WARN_ON(!lv2ent_fault(pent)))
993                         return -EADDRINUSE;
994
995                 update_pte(pent, mk_lv2ent_spage(paddr));
996                 *pgcnt -= 1;
997         } else { /* size == LPAGE_SIZE */
998                 int i;
999                 dma_addr_t pent_base = virt_to_phys(pent);
1000
1001                 dma_sync_single_for_cpu(dma_dev, pent_base,
1002                                         sizeof(*pent) * SPAGES_PER_LPAGE,
1003                                         DMA_TO_DEVICE);
1004                 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
1005                         if (WARN_ON(!lv2ent_fault(pent))) {
1006                                 if (i > 0)
1007                                         memset(pent - i, 0, sizeof(*pent) * i);
1008                                 return -EADDRINUSE;
1009                         }
1010
1011                         *pent = mk_lv2ent_lpage(paddr);
1012                 }
1013                 dma_sync_single_for_device(dma_dev, pent_base,
1014                                            sizeof(*pent) * SPAGES_PER_LPAGE,
1015                                            DMA_TO_DEVICE);
1016                 *pgcnt -= SPAGES_PER_LPAGE;
1017         }
1018
1019         return 0;
1020 }
1021
1022 /*
1023  * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1024  *
1025  * System MMU v3.x has advanced logic to improve address translation
1026  * performance with caching more page table entries by a page table walk.
1027  * However, the logic has a bug that while caching faulty page table entries,
1028  * System MMU reports page fault if the cached fault entry is hit even though
1029  * the fault entry is updated to a valid entry after the entry is cached.
1030  * To prevent caching faulty page table entries which may be updated to valid
1031  * entries later, the virtual memory manager should care about the workaround
1032  * for the problem. The following describes the workaround.
1033  *
1034  * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1035  * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
1036  *
1037  * Precisely, any start address of I/O virtual region must be aligned with
1038  * the following sizes for System MMU v3.1 and v3.2.
1039  * System MMU v3.1: 128KiB
1040  * System MMU v3.2: 256KiB
1041  *
1042  * Because System MMU v3.3 caches page table entries more aggressively, it needs
1043  * more workarounds.
1044  * - Any two consecutive I/O virtual regions must have a hole of size larger
1045  *   than or equal to 128KiB.
1046  * - Start address of an I/O virtual region must be aligned by 128KiB.
1047  */
1048 static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1049                             unsigned long l_iova, phys_addr_t paddr, size_t size,
1050                             int prot)
1051 {
1052         struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1053         sysmmu_pte_t *entry;
1054         sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1055         unsigned long flags;
1056         int ret = -ENOMEM;
1057
1058         BUG_ON(domain->pgtable == NULL);
1059
1060         spin_lock_irqsave(&domain->pgtablelock, flags);
1061
1062         entry = section_entry(domain->pgtable, iova);
1063
1064         if (size == SECT_SIZE) {
1065                 ret = lv1set_section(domain, entry, iova, paddr,
1066                                      &domain->lv2entcnt[lv1ent_offset(iova)]);
1067         } else {
1068                 sysmmu_pte_t *pent;
1069
1070                 pent = alloc_lv2entry(domain, entry, iova,
1071                                       &domain->lv2entcnt[lv1ent_offset(iova)]);
1072
1073                 if (IS_ERR(pent))
1074                         ret = PTR_ERR(pent);
1075                 else
1076                         ret = lv2set_page(pent, paddr, size,
1077                                        &domain->lv2entcnt[lv1ent_offset(iova)]);
1078         }
1079
1080         if (ret)
1081                 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1082                         __func__, ret, size, iova);
1083
1084         spin_unlock_irqrestore(&domain->pgtablelock, flags);
1085
1086         return ret;
1087 }
1088
1089 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1090                                               sysmmu_iova_t iova, size_t size)
1091 {
1092         struct sysmmu_drvdata *data;
1093         unsigned long flags;
1094
1095         spin_lock_irqsave(&domain->lock, flags);
1096
1097         list_for_each_entry(data, &domain->clients, domain_node)
1098                 sysmmu_tlb_invalidate_entry(data, iova, size);
1099
1100         spin_unlock_irqrestore(&domain->lock, flags);
1101 }
1102
1103 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1104                                  unsigned long l_iova, size_t size)
1105 {
1106         struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1107         sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1108         sysmmu_pte_t *ent;
1109         size_t err_pgsize;
1110         unsigned long flags;
1111
1112         BUG_ON(domain->pgtable == NULL);
1113
1114         spin_lock_irqsave(&domain->pgtablelock, flags);
1115
1116         ent = section_entry(domain->pgtable, iova);
1117
1118         if (lv1ent_section(ent)) {
1119                 if (WARN_ON(size < SECT_SIZE)) {
1120                         err_pgsize = SECT_SIZE;
1121                         goto err;
1122                 }
1123
1124                 /* workaround for h/w bug in System MMU v3.3 */
1125                 update_pte(ent, ZERO_LV2LINK);
1126                 size = SECT_SIZE;
1127                 goto done;
1128         }
1129
1130         if (unlikely(lv1ent_fault(ent))) {
1131                 if (size > SECT_SIZE)
1132                         size = SECT_SIZE;
1133                 goto done;
1134         }
1135
1136         /* lv1ent_page(sent) == true here */
1137
1138         ent = page_entry(ent, iova);
1139
1140         if (unlikely(lv2ent_fault(ent))) {
1141                 size = SPAGE_SIZE;
1142                 goto done;
1143         }
1144
1145         if (lv2ent_small(ent)) {
1146                 update_pte(ent, 0);
1147                 size = SPAGE_SIZE;
1148                 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
1149                 goto done;
1150         }
1151
1152         /* lv1ent_large(ent) == true here */
1153         if (WARN_ON(size < LPAGE_SIZE)) {
1154                 err_pgsize = LPAGE_SIZE;
1155                 goto err;
1156         }
1157
1158         dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1159                                 sizeof(*ent) * SPAGES_PER_LPAGE,
1160                                 DMA_TO_DEVICE);
1161         memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1162         dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1163                                    sizeof(*ent) * SPAGES_PER_LPAGE,
1164                                    DMA_TO_DEVICE);
1165         size = LPAGE_SIZE;
1166         domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1167 done:
1168         spin_unlock_irqrestore(&domain->pgtablelock, flags);
1169
1170         exynos_iommu_tlb_invalidate_entry(domain, iova, size);
1171
1172         return size;
1173 err:
1174         spin_unlock_irqrestore(&domain->pgtablelock, flags);
1175
1176         pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1177                 __func__, size, iova, err_pgsize);
1178
1179         return 0;
1180 }
1181
1182 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1183                                           dma_addr_t iova)
1184 {
1185         struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1186         sysmmu_pte_t *entry;
1187         unsigned long flags;
1188         phys_addr_t phys = 0;
1189
1190         spin_lock_irqsave(&domain->pgtablelock, flags);
1191
1192         entry = section_entry(domain->pgtable, iova);
1193
1194         if (lv1ent_section(entry)) {
1195                 phys = section_phys(entry) + section_offs(iova);
1196         } else if (lv1ent_page(entry)) {
1197                 entry = page_entry(entry, iova);
1198
1199                 if (lv2ent_large(entry))
1200                         phys = lpage_phys(entry) + lpage_offs(iova);
1201                 else if (lv2ent_small(entry))
1202                         phys = spage_phys(entry) + spage_offs(iova);
1203         }
1204
1205         spin_unlock_irqrestore(&domain->pgtablelock, flags);
1206
1207         return phys;
1208 }
1209
1210 static struct iommu_group *get_device_iommu_group(struct device *dev)
1211 {
1212         struct iommu_group *group;
1213
1214         group = iommu_group_get(dev);
1215         if (!group)
1216                 group = iommu_group_alloc();
1217
1218         return group;
1219 }
1220
1221 static int exynos_iommu_add_device(struct device *dev)
1222 {
1223         struct iommu_group *group;
1224
1225         if (!has_sysmmu(dev))
1226                 return -ENODEV;
1227
1228         group = iommu_group_get_for_dev(dev);
1229
1230         if (IS_ERR(group))
1231                 return PTR_ERR(group);
1232
1233         iommu_group_put(group);
1234
1235         return 0;
1236 }
1237
1238 static void exynos_iommu_remove_device(struct device *dev)
1239 {
1240         if (!has_sysmmu(dev))
1241                 return;
1242
1243         iommu_group_remove_device(dev);
1244 }
1245
1246 static int exynos_iommu_of_xlate(struct device *dev,
1247                                  struct of_phandle_args *spec)
1248 {
1249         struct exynos_iommu_owner *owner = dev->archdata.iommu;
1250         struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1251         struct sysmmu_drvdata *data;
1252
1253         if (!sysmmu)
1254                 return -ENODEV;
1255
1256         data = platform_get_drvdata(sysmmu);
1257         if (!data)
1258                 return -ENODEV;
1259
1260         if (!owner) {
1261                 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1262                 if (!owner)
1263                         return -ENOMEM;
1264
1265                 INIT_LIST_HEAD(&owner->controllers);
1266                 dev->archdata.iommu = owner;
1267         }
1268
1269         list_add_tail(&data->owner_node, &owner->controllers);
1270         return 0;
1271 }
1272
1273 static struct iommu_ops exynos_iommu_ops = {
1274         .domain_alloc = exynos_iommu_domain_alloc,
1275         .domain_free = exynos_iommu_domain_free,
1276         .attach_dev = exynos_iommu_attach_device,
1277         .detach_dev = exynos_iommu_detach_device,
1278         .map = exynos_iommu_map,
1279         .unmap = exynos_iommu_unmap,
1280         .map_sg = default_iommu_map_sg,
1281         .iova_to_phys = exynos_iommu_iova_to_phys,
1282         .device_group = get_device_iommu_group,
1283         .add_device = exynos_iommu_add_device,
1284         .remove_device = exynos_iommu_remove_device,
1285         .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1286         .of_xlate = exynos_iommu_of_xlate,
1287 };
1288
1289 static bool init_done;
1290
1291 static int __init exynos_iommu_init(void)
1292 {
1293         int ret;
1294
1295         lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1296                                 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1297         if (!lv2table_kmem_cache) {
1298                 pr_err("%s: Failed to create kmem cache\n", __func__);
1299                 return -ENOMEM;
1300         }
1301
1302         ret = platform_driver_register(&exynos_sysmmu_driver);
1303         if (ret) {
1304                 pr_err("%s: Failed to register driver\n", __func__);
1305                 goto err_reg_driver;
1306         }
1307
1308         zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1309         if (zero_lv2_table == NULL) {
1310                 pr_err("%s: Failed to allocate zero level2 page table\n",
1311                         __func__);
1312                 ret = -ENOMEM;
1313                 goto err_zero_lv2;
1314         }
1315
1316         ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1317         if (ret) {
1318                 pr_err("%s: Failed to register exynos-iommu driver.\n",
1319                                                                 __func__);
1320                 goto err_set_iommu;
1321         }
1322
1323         init_done = true;
1324
1325         return 0;
1326 err_set_iommu:
1327         kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1328 err_zero_lv2:
1329         platform_driver_unregister(&exynos_sysmmu_driver);
1330 err_reg_driver:
1331         kmem_cache_destroy(lv2table_kmem_cache);
1332         return ret;
1333 }
1334
1335 static int __init exynos_iommu_of_setup(struct device_node *np)
1336 {
1337         struct platform_device *pdev;
1338
1339         if (!init_done)
1340                 exynos_iommu_init();
1341
1342         pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
1343         if (IS_ERR(pdev))
1344                 return PTR_ERR(pdev);
1345
1346         /*
1347          * use the first registered sysmmu device for performing
1348          * dma mapping operations on iommu page tables (cpu cache flush)
1349          */
1350         if (!dma_dev)
1351                 dma_dev = &pdev->dev;
1352
1353         of_iommu_set_ops(np, &exynos_iommu_ops);
1354         return 0;
1355 }
1356
1357 IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu",
1358                  exynos_iommu_of_setup);