2 * omap iommu: simple virtual address space management
4 * Copyright (C) 2008-2009 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/device.h>
17 #include <linux/scatterlist.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mach/map.h>
22 #include <plat/iommu.h>
23 #include <plat/iovmm.h>
25 #include "iopgtable.h"
28 * A device driver needs to create address mappings between:
30 * - iommu/device address
32 * - mpu virtual address
34 * There are 4 possible patterns for them:
36 * |iova/ mapping iommu_ page
37 * | da pa va (d)-(p)-(v) function type
38 * ---------------------------------------------------------------------------
39 * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
40 * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
41 * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
42 * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
45 * 'iova': device iommu virtual address
46 * 'da': alias of 'iova'
47 * 'pa': physical address
48 * 'va': mpu virtual address
50 * 'c': contiguous memory area
51 * 'd': discontiguous memory area
52 * 'a': anonymous memory allocation
53 * '()': optional feature
55 * 'n': a normal page(4KB) size is used.
56 * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
58 * '*': not yet, but feasible.
61 static struct kmem_cache *iovm_area_cachep;
63 /* return total bytes of sg buffers */
64 static size_t sgtable_len(const struct sg_table *sgt)
66 unsigned int i, total = 0;
67 struct scatterlist *sg;
72 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
75 bytes = sg_dma_len(sg);
77 if (!iopgsz_ok(bytes)) {
78 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
88 #define sgtable_ok(x) (!!sgtable_len(x))
91 * calculate the optimal number sg elements from total bytes based on
94 static unsigned int sgtable_nents(size_t bytes)
97 unsigned int nr_entries;
98 const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
100 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
101 pr_err("%s: wrong size %08x\n", __func__, bytes);
106 for (i = 0; i < ARRAY_SIZE(pagesize); i++) {
107 if (bytes >= pagesize[i]) {
108 nr_entries += (bytes / pagesize[i]);
109 bytes %= pagesize[i];
117 /* allocate and initialize sg_table header(a kind of 'superblock') */
118 static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
120 unsigned int nr_entries;
122 struct sg_table *sgt;
125 return ERR_PTR(-EINVAL);
127 if (!IS_ALIGNED(bytes, PAGE_SIZE))
128 return ERR_PTR(-EINVAL);
130 /* FIXME: IOVMF_DA_FIXED should support 'superpages' */
131 if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) {
132 nr_entries = sgtable_nents(bytes);
134 return ERR_PTR(-EINVAL);
136 nr_entries = bytes / PAGE_SIZE;
138 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
140 return ERR_PTR(-ENOMEM);
142 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
146 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
151 /* free sg_table header(a kind of superblock) */
152 static void sgtable_free(struct sg_table *sgt)
160 pr_debug("%s: sgt:%p\n", __func__, sgt);
163 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
164 static void *vmap_sg(const struct sg_table *sgt)
169 struct scatterlist *sg;
170 struct vm_struct *new;
171 const struct mem_type *mtype;
173 mtype = get_mem_type(MT_DEVICE);
175 return ERR_PTR(-EINVAL);
177 total = sgtable_len(sgt);
179 return ERR_PTR(-EINVAL);
181 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
183 return ERR_PTR(-ENOMEM);
186 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
192 bytes = sg_dma_len(sg);
194 BUG_ON(bytes != PAGE_SIZE);
196 err = ioremap_page(va, pa, mtype);
203 flush_cache_vmap((unsigned long)new->addr,
204 (unsigned long)(new->addr + total));
208 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
210 return ERR_PTR(-EAGAIN);
213 static inline void vunmap_sg(const void *va)
218 static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
220 struct iovm_struct *tmp;
222 list_for_each_entry(tmp, &obj->mmap, list) {
223 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
226 len = tmp->da_end - tmp->da_start;
228 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
229 __func__, tmp->da_start, da, tmp->da_end, len,
240 * find_iovm_area - find iovma which includes @da
241 * @da: iommu device virtual address
243 * Find the existing iovma starting at @da
245 struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
247 struct iovm_struct *area;
249 mutex_lock(&obj->mmap_lock);
250 area = __find_iovm_area(obj, da);
251 mutex_unlock(&obj->mmap_lock);
255 EXPORT_SYMBOL_GPL(find_iovm_area);
258 * This finds the hole(area) which fits the requested address and len
259 * in iovmas mmap, and returns the new allocated iovma.
261 static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
262 size_t bytes, u32 flags)
264 struct iovm_struct *new, *tmp;
265 u32 start, prev_end, alignement;
268 return ERR_PTR(-EINVAL);
271 alignement = PAGE_SIZE;
273 if (flags & IOVMF_DA_ANON) {
275 * Reserve the first page for NULL
278 if (flags & IOVMF_LINEAR)
279 alignement = iopgsz_max(bytes);
280 start = roundup(start, alignement);
284 if (list_empty(&obj->mmap))
288 list_for_each_entry(tmp, &obj->mmap, list) {
290 if (prev_end >= start)
293 if (start + bytes < tmp->da_start)
296 if (flags & IOVMF_DA_ANON)
297 start = roundup(tmp->da_end + 1, alignement);
299 prev_end = tmp->da_end;
302 if ((start > prev_end) && (ULONG_MAX - start >= bytes))
305 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
306 __func__, da, bytes, flags);
308 return ERR_PTR(-EINVAL);
311 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
313 return ERR_PTR(-ENOMEM);
316 new->da_start = start;
317 new->da_end = start + bytes;
321 * keep ascending order of iovmas
324 list_add_tail(&new->list, &tmp->list);
326 list_add(&new->list, &obj->mmap);
328 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
329 __func__, new->da_start, start, new->da_end, bytes, flags);
334 static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
338 BUG_ON(!obj || !area);
340 bytes = area->da_end - area->da_start;
342 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
343 __func__, area->da_start, area->da_end, bytes, area->flags);
345 list_del(&area->list);
346 kmem_cache_free(iovm_area_cachep, area);
350 * da_to_va - convert (d) to (v)
351 * @obj: objective iommu
352 * @da: iommu device virtual address
353 * @va: mpu virtual address
355 * Returns mpu virtual addr which corresponds to a given device virtual addr
357 void *da_to_va(struct iommu *obj, u32 da)
360 struct iovm_struct *area;
362 mutex_lock(&obj->mmap_lock);
364 area = __find_iovm_area(obj, da);
366 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
371 mutex_unlock(&obj->mmap_lock);
375 EXPORT_SYMBOL_GPL(da_to_va);
377 static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
380 struct scatterlist *sg;
384 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
386 const size_t bytes = PAGE_SIZE;
389 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
391 pg = vmalloc_to_page(va);
393 sg_set_page(sg, pg, bytes, 0);
398 va_end = _va + PAGE_SIZE * i;
401 static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
404 * Actually this is not necessary at all, just exists for
405 * consistency of the code readability.
410 static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
413 struct scatterlist *sg;
416 va = phys_to_virt(pa);
418 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
421 bytes = iopgsz_max(len);
423 BUG_ON(!iopgsz_ok(bytes));
425 sg_set_buf(sg, phys_to_virt(pa), bytes);
427 * 'pa' is cotinuous(linear).
435 static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
438 * Actually this is not necessary at all, just exists for
439 * consistency of the code readability
444 /* create 'da' <-> 'pa' mapping from 'sgt' */
445 static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
446 const struct sg_table *sgt, u32 flags)
450 struct scatterlist *sg;
451 u32 da = new->da_start;
456 BUG_ON(!sgtable_ok(sgt));
458 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
462 struct iotlb_entry e;
465 bytes = sg_dma_len(sg);
467 flags &= ~IOVMF_PGSZ_MASK;
468 pgsz = bytes_to_iopgsz(bytes);
473 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
476 iotlb_init_entry(&e, da, pa, flags);
477 err = iopgtable_store_entry(obj, &e);
488 for_each_sg(sgt->sgl, sg, i, j) {
491 bytes = iopgtable_clear_entry(obj, da);
493 BUG_ON(!iopgsz_ok(bytes));
500 /* release 'da' <-> 'pa' mapping */
501 static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
504 size_t total = area->da_end - area->da_start;
506 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
508 start = area->da_start;
512 bytes = iopgtable_clear_entry(obj, start);
516 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
517 __func__, start, bytes, area->flags);
519 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
527 /* template function for all unmapping */
528 static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
529 void (*fn)(const void *), u32 flags)
531 struct sg_table *sgt = NULL;
532 struct iovm_struct *area;
534 if (!IS_ALIGNED(da, PAGE_SIZE)) {
535 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
539 mutex_lock(&obj->mmap_lock);
541 area = __find_iovm_area(obj, da);
543 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
547 if ((area->flags & flags) != flags) {
548 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
552 sgt = (struct sg_table *)area->sgt;
554 unmap_iovm_area(obj, area);
558 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
559 area->da_start, da, area->da_end,
560 area->da_end - area->da_start, area->flags);
562 free_iovm_area(obj, area);
564 mutex_unlock(&obj->mmap_lock);
569 static u32 map_iommu_region(struct iommu *obj, u32 da,
570 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
573 struct iovm_struct *new;
575 mutex_lock(&obj->mmap_lock);
577 new = alloc_iovm_area(obj, da, bytes, flags);
580 goto err_alloc_iovma;
585 if (map_iovm_area(obj, new, sgt, new->flags))
588 mutex_unlock(&obj->mmap_lock);
590 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
591 __func__, new->da_start, bytes, new->flags, va);
593 return new->da_start;
596 free_iovm_area(obj, new);
598 mutex_unlock(&obj->mmap_lock);
602 static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
603 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
605 return map_iommu_region(obj, da, sgt, va, bytes, flags);
609 * iommu_vmap - (d)-(p)-(v) address mapper
610 * @obj: objective iommu
611 * @sgt: address of scatter gather table
612 * @flags: iovma and page property
614 * Creates 1-n-1 mapping with given @sgt and returns @da.
615 * All @sgt element must be io page size aligned.
617 u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
623 if (!obj || !obj->dev || !sgt)
626 bytes = sgtable_len(sgt);
629 bytes = PAGE_ALIGN(bytes);
631 if (flags & IOVMF_MMIO) {
637 flags &= IOVMF_HW_MASK;
638 flags |= IOVMF_DISCONT;
640 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
642 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
643 if (IS_ERR_VALUE(da))
648 EXPORT_SYMBOL_GPL(iommu_vmap);
651 * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
652 * @obj: objective iommu
653 * @da: iommu device virtual address
655 * Free the iommu virtually contiguous memory area starting at
656 * @da, which was returned by 'iommu_vmap()'.
658 struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
660 struct sg_table *sgt;
662 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
663 * Just returns 'sgt' to the caller to free
665 sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
667 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
670 EXPORT_SYMBOL_GPL(iommu_vunmap);
673 * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
674 * @obj: objective iommu
675 * @da: contiguous iommu virtual memory
676 * @bytes: allocation size
677 * @flags: iovma and page property
679 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
680 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
682 u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
685 struct sg_table *sgt;
687 if (!obj || !obj->dev || !bytes)
690 bytes = PAGE_ALIGN(bytes);
696 sgt = sgtable_alloc(bytes, flags);
701 sgtable_fill_vmalloc(sgt, va);
703 flags &= IOVMF_HW_MASK;
704 flags |= IOVMF_DISCONT;
705 flags |= IOVMF_ALLOC;
706 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
708 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
709 if (IS_ERR_VALUE(da))
715 sgtable_drain_vmalloc(sgt);
721 EXPORT_SYMBOL_GPL(iommu_vmalloc);
724 * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
725 * @obj: objective iommu
726 * @da: iommu device virtual address
728 * Frees the iommu virtually continuous memory area starting at
729 * @da, as obtained from 'iommu_vmalloc()'.
731 void iommu_vfree(struct iommu *obj, const u32 da)
733 struct sg_table *sgt;
735 sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
737 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
740 EXPORT_SYMBOL_GPL(iommu_vfree);
742 static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
743 size_t bytes, u32 flags)
745 struct sg_table *sgt;
747 sgt = sgtable_alloc(bytes, flags);
751 sgtable_fill_kmalloc(sgt, pa, bytes);
753 da = map_iommu_region(obj, da, sgt, va, bytes, flags);
754 if (IS_ERR_VALUE(da)) {
755 sgtable_drain_kmalloc(sgt);
763 * iommu_kmap - (d)-(p)-(v) address mapper
764 * @obj: objective iommu
765 * @da: contiguous iommu virtual memory
766 * @pa: contiguous physical memory
767 * @flags: iovma and page property
769 * Creates 1-1-1 mapping and returns @da again, which can be
770 * adjusted if 'IOVMF_DA_ANON' is set.
772 u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
777 if (!obj || !obj->dev || !bytes)
780 bytes = PAGE_ALIGN(bytes);
782 va = ioremap(pa, bytes);
786 flags &= IOVMF_HW_MASK;
787 flags |= IOVMF_LINEAR;
789 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
791 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
792 if (IS_ERR_VALUE(da))
797 EXPORT_SYMBOL_GPL(iommu_kmap);
800 * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
801 * @obj: objective iommu
802 * @da: iommu device virtual address
804 * Frees the iommu virtually contiguous memory area starting at
805 * @da, which was passed to and was returned by'iommu_kmap()'.
807 void iommu_kunmap(struct iommu *obj, u32 da)
809 struct sg_table *sgt;
810 typedef void (*func_t)(const void *);
812 sgt = unmap_vm_area(obj, da, (func_t)__iounmap,
813 IOVMF_LINEAR | IOVMF_MMIO);
815 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
818 EXPORT_SYMBOL_GPL(iommu_kunmap);
821 * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
822 * @obj: objective iommu
823 * @da: contiguous iommu virtual memory
824 * @bytes: bytes for allocation
825 * @flags: iovma and page property
827 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
828 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
830 u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
835 if (!obj || !obj->dev || !bytes)
838 bytes = PAGE_ALIGN(bytes);
840 va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
843 pa = virt_to_phys(va);
845 flags &= IOVMF_HW_MASK;
846 flags |= IOVMF_LINEAR;
847 flags |= IOVMF_ALLOC;
848 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
850 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
851 if (IS_ERR_VALUE(da))
856 EXPORT_SYMBOL_GPL(iommu_kmalloc);
859 * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
860 * @obj: objective iommu
861 * @da: iommu device virtual address
863 * Frees the iommu virtually contiguous memory area starting at
864 * @da, which was passed to and was returned by'iommu_kmalloc()'.
866 void iommu_kfree(struct iommu *obj, u32 da)
868 struct sg_table *sgt;
870 sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
872 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
875 EXPORT_SYMBOL_GPL(iommu_kfree);
878 static int __init iovmm_init(void)
880 const unsigned long flags = SLAB_HWCACHE_ALIGN;
881 struct kmem_cache *p;
883 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
887 iovm_area_cachep = p;
891 module_init(iovmm_init);
893 static void __exit iovmm_exit(void)
895 kmem_cache_destroy(iovm_area_cachep);
897 module_exit(iovmm_exit);
899 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
900 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
901 MODULE_LICENSE("GPL v2");