2 * SWIOTLB-based DMA API implementation
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/gfp.h>
21 #include <linux/acpi.h>
22 #include <linux/bootmem.h>
23 #include <linux/cache.h>
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/genalloc.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dma-contiguous.h>
29 #include <linux/vmalloc.h>
30 #include <linux/swiotlb.h>
31 #include <linux/pci.h>
33 #include <asm/cacheflush.h>
35 static int swiotlb __ro_after_init;
37 static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
40 if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
41 return pgprot_writecombine(prot);
45 static struct gen_pool *atomic_pool;
47 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
48 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
50 static int __init early_coherent_pool(char *p)
52 atomic_pool_size = memparse(p, &p);
55 early_param("coherent_pool", early_coherent_pool);
57 static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
63 WARN(1, "coherent pool not initialised!\n");
67 val = gen_pool_alloc(atomic_pool, size);
69 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
71 *ret_page = phys_to_page(phys);
79 static bool __in_atomic_pool(void *start, size_t size)
81 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
84 static int __free_from_pool(void *start, size_t size)
86 if (!__in_atomic_pool(start, size))
89 gen_pool_free(atomic_pool, (unsigned long)start, size);
94 static void *__dma_alloc_coherent(struct device *dev, size_t size,
95 dma_addr_t *dma_handle, gfp_t flags,
99 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
103 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
104 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
106 if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
110 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
111 get_order(size), flags);
115 *dma_handle = phys_to_dma(dev, page_to_phys(page));
116 addr = page_address(page);
117 memset(addr, 0, size);
120 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
124 static void __dma_free_coherent(struct device *dev, size_t size,
125 void *vaddr, dma_addr_t dma_handle,
129 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
132 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
136 freed = dma_release_from_contiguous(dev,
140 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
143 static void *__dma_alloc(struct device *dev, size_t size,
144 dma_addr_t *dma_handle, gfp_t flags,
148 void *ptr, *coherent_ptr;
149 bool coherent = is_device_dma_coherent(dev);
150 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
152 size = PAGE_ALIGN(size);
154 if (!coherent && !gfpflags_allow_blocking(flags)) {
155 struct page *page = NULL;
156 void *addr = __alloc_from_pool(size, &page, flags);
159 *dma_handle = phys_to_dma(dev, page_to_phys(page));
164 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
168 /* no need for non-cacheable mapping if coherent */
172 /* remove any dirty cache lines on the kernel alias */
173 __dma_flush_area(ptr, size);
175 /* create a coherent mapping */
176 page = virt_to_page(ptr);
177 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
185 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
187 *dma_handle = DMA_ERROR_CODE;
191 static void __dma_free(struct device *dev, size_t size,
192 void *vaddr, dma_addr_t dma_handle,
195 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
197 size = PAGE_ALIGN(size);
199 if (!is_device_dma_coherent(dev)) {
200 if (__free_from_pool(vaddr, size))
204 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
207 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
208 unsigned long offset, size_t size,
209 enum dma_data_direction dir,
214 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
215 if (!is_device_dma_coherent(dev) &&
216 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
217 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
223 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
224 size_t size, enum dma_data_direction dir,
227 if (!is_device_dma_coherent(dev) &&
228 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
229 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
230 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
233 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
234 int nelems, enum dma_data_direction dir,
237 struct scatterlist *sg;
240 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
241 if (!is_device_dma_coherent(dev) &&
242 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
243 for_each_sg(sgl, sg, ret, i)
244 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
250 static void __swiotlb_unmap_sg_attrs(struct device *dev,
251 struct scatterlist *sgl, int nelems,
252 enum dma_data_direction dir,
255 struct scatterlist *sg;
258 if (!is_device_dma_coherent(dev) &&
259 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
260 for_each_sg(sgl, sg, nelems, i)
261 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
263 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
266 static void __swiotlb_sync_single_for_cpu(struct device *dev,
267 dma_addr_t dev_addr, size_t size,
268 enum dma_data_direction dir)
270 if (!is_device_dma_coherent(dev))
271 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
272 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
275 static void __swiotlb_sync_single_for_device(struct device *dev,
276 dma_addr_t dev_addr, size_t size,
277 enum dma_data_direction dir)
279 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
280 if (!is_device_dma_coherent(dev))
281 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
284 static void __swiotlb_sync_sg_for_cpu(struct device *dev,
285 struct scatterlist *sgl, int nelems,
286 enum dma_data_direction dir)
288 struct scatterlist *sg;
291 if (!is_device_dma_coherent(dev))
292 for_each_sg(sgl, sg, nelems, i)
293 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
295 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
298 static void __swiotlb_sync_sg_for_device(struct device *dev,
299 struct scatterlist *sgl, int nelems,
300 enum dma_data_direction dir)
302 struct scatterlist *sg;
305 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
306 if (!is_device_dma_coherent(dev))
307 for_each_sg(sgl, sg, nelems, i)
308 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
312 static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
313 unsigned long pfn, size_t size)
316 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
318 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
319 unsigned long off = vma->vm_pgoff;
321 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
322 ret = remap_pfn_range(vma, vma->vm_start,
324 vma->vm_end - vma->vm_start,
331 static int __swiotlb_mmap(struct device *dev,
332 struct vm_area_struct *vma,
333 void *cpu_addr, dma_addr_t dma_addr, size_t size,
337 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
339 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
340 is_device_dma_coherent(dev));
342 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
345 return __swiotlb_mmap_pfn(vma, pfn, size);
348 static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
349 struct page *page, size_t size)
351 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
354 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
359 static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
360 void *cpu_addr, dma_addr_t handle, size_t size,
363 struct page *page = phys_to_page(dma_to_phys(dev, handle));
365 return __swiotlb_get_sgtable_page(sgt, page, size);
368 static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
371 return swiotlb_dma_supported(hwdev, mask);
375 static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
378 return swiotlb_dma_mapping_error(hwdev, addr);
382 static const struct dma_map_ops swiotlb_dma_ops = {
383 .alloc = __dma_alloc,
385 .mmap = __swiotlb_mmap,
386 .get_sgtable = __swiotlb_get_sgtable,
387 .map_page = __swiotlb_map_page,
388 .unmap_page = __swiotlb_unmap_page,
389 .map_sg = __swiotlb_map_sg_attrs,
390 .unmap_sg = __swiotlb_unmap_sg_attrs,
391 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
392 .sync_single_for_device = __swiotlb_sync_single_for_device,
393 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
394 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
395 .dma_supported = __swiotlb_dma_supported,
396 .mapping_error = __swiotlb_dma_mapping_error,
399 static int __init atomic_pool_init(void)
401 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
402 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
405 unsigned int pool_size_order = get_order(atomic_pool_size);
407 if (dev_get_cma_area(NULL))
408 page = dma_alloc_from_contiguous(NULL, nr_pages,
409 pool_size_order, GFP_KERNEL);
411 page = alloc_pages(GFP_DMA, pool_size_order);
415 void *page_addr = page_address(page);
417 memset(page_addr, 0, atomic_pool_size);
418 __dma_flush_area(page_addr, atomic_pool_size);
420 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
424 addr = dma_common_contiguous_remap(page, atomic_pool_size,
425 VM_USERMAP, prot, atomic_pool_init);
428 goto destroy_genpool;
430 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
432 atomic_pool_size, -1);
436 gen_pool_set_algo(atomic_pool,
437 gen_pool_first_fit_order_align,
440 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
441 atomic_pool_size / 1024);
447 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
449 gen_pool_destroy(atomic_pool);
452 if (!dma_release_from_contiguous(NULL, page, nr_pages))
453 __free_pages(page, pool_size_order);
455 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
456 atomic_pool_size / 1024);
460 /********************************************
461 * The following APIs are for dummy DMA ops *
462 ********************************************/
464 static void *__dummy_alloc(struct device *dev, size_t size,
465 dma_addr_t *dma_handle, gfp_t flags,
471 static void __dummy_free(struct device *dev, size_t size,
472 void *vaddr, dma_addr_t dma_handle,
477 static int __dummy_mmap(struct device *dev,
478 struct vm_area_struct *vma,
479 void *cpu_addr, dma_addr_t dma_addr, size_t size,
485 static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
486 unsigned long offset, size_t size,
487 enum dma_data_direction dir,
490 return DMA_ERROR_CODE;
493 static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
494 size_t size, enum dma_data_direction dir,
499 static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
500 int nelems, enum dma_data_direction dir,
506 static void __dummy_unmap_sg(struct device *dev,
507 struct scatterlist *sgl, int nelems,
508 enum dma_data_direction dir,
513 static void __dummy_sync_single(struct device *dev,
514 dma_addr_t dev_addr, size_t size,
515 enum dma_data_direction dir)
519 static void __dummy_sync_sg(struct device *dev,
520 struct scatterlist *sgl, int nelems,
521 enum dma_data_direction dir)
525 static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
530 static int __dummy_dma_supported(struct device *hwdev, u64 mask)
535 const struct dma_map_ops dummy_dma_ops = {
536 .alloc = __dummy_alloc,
537 .free = __dummy_free,
538 .mmap = __dummy_mmap,
539 .map_page = __dummy_map_page,
540 .unmap_page = __dummy_unmap_page,
541 .map_sg = __dummy_map_sg,
542 .unmap_sg = __dummy_unmap_sg,
543 .sync_single_for_cpu = __dummy_sync_single,
544 .sync_single_for_device = __dummy_sync_single,
545 .sync_sg_for_cpu = __dummy_sync_sg,
546 .sync_sg_for_device = __dummy_sync_sg,
547 .mapping_error = __dummy_mapping_error,
548 .dma_supported = __dummy_dma_supported,
550 EXPORT_SYMBOL(dummy_dma_ops);
552 static int __init arm64_dma_init(void)
554 if (swiotlb_force == SWIOTLB_FORCE ||
555 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
558 return atomic_pool_init();
560 arch_initcall(arm64_dma_init);
562 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
564 static int __init dma_debug_do_init(void)
566 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
569 fs_initcall(dma_debug_do_init);
572 #ifdef CONFIG_IOMMU_DMA
573 #include <linux/dma-iommu.h>
574 #include <linux/platform_device.h>
575 #include <linux/amba/bus.h>
577 /* Thankfully, all cache ops are by VA so we can ignore phys here */
578 static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
580 __dma_flush_area(virt, PAGE_SIZE);
583 static void *__iommu_alloc_attrs(struct device *dev, size_t size,
584 dma_addr_t *handle, gfp_t gfp,
587 bool coherent = is_device_dma_coherent(dev);
588 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
589 size_t iosize = size;
592 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
595 size = PAGE_ALIGN(size);
598 * Some drivers rely on this, and we probably don't want the
599 * possibility of stale kernel data being read by devices anyway.
603 if (!gfpflags_allow_blocking(gfp)) {
606 * In atomic context we can't remap anything, so we'll only
607 * get the virtually contiguous buffer we need by way of a
608 * physically contiguous allocation.
611 page = alloc_pages(gfp, get_order(size));
612 addr = page ? page_address(page) : NULL;
614 addr = __alloc_from_pool(size, &page, gfp);
619 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
620 if (iommu_dma_mapping_error(dev, *handle)) {
622 __free_pages(page, get_order(size));
624 __free_from_pool(addr, size);
627 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
628 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
631 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
632 get_order(size), gfp);
636 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
637 if (iommu_dma_mapping_error(dev, *handle)) {
638 dma_release_from_contiguous(dev, page,
643 __dma_flush_area(page_to_virt(page), iosize);
645 addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
647 __builtin_return_address(0));
649 iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
650 dma_release_from_contiguous(dev, page,
654 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
657 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
662 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
663 __builtin_return_address(0));
665 iommu_dma_free(dev, pages, iosize, handle);
670 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
671 dma_addr_t handle, unsigned long attrs)
673 size_t iosize = size;
675 size = PAGE_ALIGN(size);
677 * @cpu_addr will be one of 4 things depending on how it was allocated:
678 * - A remapped array of pages for contiguous allocations.
679 * - A remapped array of pages from iommu_dma_alloc(), for all
680 * non-atomic allocations.
681 * - A non-cacheable alias from the atomic pool, for atomic
682 * allocations by non-coherent devices.
683 * - A normal lowmem address, for atomic allocations by
685 * Hence how dodgy the below logic looks...
687 if (__in_atomic_pool(cpu_addr, size)) {
688 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
689 __free_from_pool(cpu_addr, size);
690 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
691 struct page *page = vmalloc_to_page(cpu_addr);
693 iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
694 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
695 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
696 } else if (is_vmalloc_addr(cpu_addr)){
697 struct vm_struct *area = find_vm_area(cpu_addr);
699 if (WARN_ON(!area || !area->pages))
701 iommu_dma_free(dev, area->pages, iosize, &handle);
702 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
704 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
705 __free_pages(virt_to_page(cpu_addr), get_order(size));
709 static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
710 void *cpu_addr, dma_addr_t dma_addr, size_t size,
713 struct vm_struct *area;
716 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
717 is_device_dma_coherent(dev));
719 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
722 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
724 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
725 * hence in the vmalloc space.
727 unsigned long pfn = vmalloc_to_pfn(cpu_addr);
728 return __swiotlb_mmap_pfn(vma, pfn, size);
731 area = find_vm_area(cpu_addr);
732 if (WARN_ON(!area || !area->pages))
735 return iommu_dma_mmap(area->pages, size, vma);
738 static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
739 void *cpu_addr, dma_addr_t dma_addr,
740 size_t size, unsigned long attrs)
742 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
743 struct vm_struct *area = find_vm_area(cpu_addr);
745 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
747 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
748 * hence in the vmalloc space.
750 struct page *page = vmalloc_to_page(cpu_addr);
751 return __swiotlb_get_sgtable_page(sgt, page, size);
754 if (WARN_ON(!area || !area->pages))
757 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
761 static void __iommu_sync_single_for_cpu(struct device *dev,
762 dma_addr_t dev_addr, size_t size,
763 enum dma_data_direction dir)
767 if (is_device_dma_coherent(dev))
770 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
771 __dma_unmap_area(phys_to_virt(phys), size, dir);
774 static void __iommu_sync_single_for_device(struct device *dev,
775 dma_addr_t dev_addr, size_t size,
776 enum dma_data_direction dir)
780 if (is_device_dma_coherent(dev))
783 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
784 __dma_map_area(phys_to_virt(phys), size, dir);
787 static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
788 unsigned long offset, size_t size,
789 enum dma_data_direction dir,
792 bool coherent = is_device_dma_coherent(dev);
793 int prot = dma_info_to_prot(dir, coherent, attrs);
794 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
796 if (!iommu_dma_mapping_error(dev, dev_addr) &&
797 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
798 __iommu_sync_single_for_device(dev, dev_addr, size, dir);
803 static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
804 size_t size, enum dma_data_direction dir,
807 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
808 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
810 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
813 static void __iommu_sync_sg_for_cpu(struct device *dev,
814 struct scatterlist *sgl, int nelems,
815 enum dma_data_direction dir)
817 struct scatterlist *sg;
820 if (is_device_dma_coherent(dev))
823 for_each_sg(sgl, sg, nelems, i)
824 __dma_unmap_area(sg_virt(sg), sg->length, dir);
827 static void __iommu_sync_sg_for_device(struct device *dev,
828 struct scatterlist *sgl, int nelems,
829 enum dma_data_direction dir)
831 struct scatterlist *sg;
834 if (is_device_dma_coherent(dev))
837 for_each_sg(sgl, sg, nelems, i)
838 __dma_map_area(sg_virt(sg), sg->length, dir);
841 static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
842 int nelems, enum dma_data_direction dir,
845 bool coherent = is_device_dma_coherent(dev);
847 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
848 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
850 return iommu_dma_map_sg(dev, sgl, nelems,
851 dma_info_to_prot(dir, coherent, attrs));
854 static void __iommu_unmap_sg_attrs(struct device *dev,
855 struct scatterlist *sgl, int nelems,
856 enum dma_data_direction dir,
859 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
860 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
862 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
865 static const struct dma_map_ops iommu_dma_ops = {
866 .alloc = __iommu_alloc_attrs,
867 .free = __iommu_free_attrs,
868 .mmap = __iommu_mmap_attrs,
869 .get_sgtable = __iommu_get_sgtable,
870 .map_page = __iommu_map_page,
871 .unmap_page = __iommu_unmap_page,
872 .map_sg = __iommu_map_sg_attrs,
873 .unmap_sg = __iommu_unmap_sg_attrs,
874 .sync_single_for_cpu = __iommu_sync_single_for_cpu,
875 .sync_single_for_device = __iommu_sync_single_for_device,
876 .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
877 .sync_sg_for_device = __iommu_sync_sg_for_device,
878 .map_resource = iommu_dma_map_resource,
879 .unmap_resource = iommu_dma_unmap_resource,
880 .mapping_error = iommu_dma_mapping_error,
883 static int __init __iommu_dma_init(void)
885 return iommu_dma_init();
887 arch_initcall(__iommu_dma_init);
889 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
890 const struct iommu_ops *ops)
892 struct iommu_domain *domain;
898 * The IOMMU core code allocates the default DMA domain, which the
899 * underlying IOMMU driver needs to support via the dma-iommu layer.
901 domain = iommu_get_domain_for_dev(dev);
906 if (domain->type == IOMMU_DOMAIN_DMA) {
907 if (iommu_dma_init_domain(domain, dma_base, size, dev))
910 dev->dma_ops = &iommu_dma_ops;
916 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
920 void arch_teardown_dma_ops(struct device *dev)
927 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
928 const struct iommu_ops *iommu)
931 #endif /* CONFIG_IOMMU_DMA */
933 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
934 const struct iommu_ops *iommu, bool coherent)
937 dev->dma_ops = &swiotlb_dma_ops;
939 dev->archdata.dma_coherent = coherent;
940 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
943 if (xen_initial_domain()) {
944 dev->archdata.dev_dma_ops = dev->dma_ops;
945 dev->dma_ops = xen_dma_ops;