]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/arm64/mm/dma-mapping.c
Merge tag 'iommu-updates-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / arch / arm64 / mm / dma-mapping.c
index 9a667a1ac6a6ac95645028fafe6cb6f7ce897179..3216e098c05877178705cdcd659a375e0afec0fd 100644 (file)
@@ -309,24 +309,15 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
                                       sg->length, dir);
 }
 
-static int __swiotlb_mmap(struct device *dev,
-                         struct vm_area_struct *vma,
-                         void *cpu_addr, dma_addr_t dma_addr, size_t size,
-                         unsigned long attrs)
+static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
+                             unsigned long pfn, size_t size)
 {
        int ret = -ENXIO;
        unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
                                        PAGE_SHIFT;
        unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
        unsigned long off = vma->vm_pgoff;
 
-       vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
-                                            is_device_dma_coherent(dev));
-
-       if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
-               return ret;
-
        if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
                ret = remap_pfn_range(vma, vma->vm_start,
                                      pfn + off,
@@ -337,19 +328,43 @@ static int __swiotlb_mmap(struct device *dev,
        return ret;
 }
 
-static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
-                                void *cpu_addr, dma_addr_t handle, size_t size,
-                                unsigned long attrs)
+static int __swiotlb_mmap(struct device *dev,
+                         struct vm_area_struct *vma,
+                         void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                         unsigned long attrs)
+{
+       int ret;
+       unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
+
+       vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+                                            is_device_dma_coherent(dev));
+
+       if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+               return ret;
+
+       return __swiotlb_mmap_pfn(vma, pfn, size);
+}
+
+static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
+                                     struct page *page, size_t size)
 {
        int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
 
        if (!ret)
-               sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
-                           PAGE_ALIGN(size), 0);
+               sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
 
        return ret;
 }
 
+static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                void *cpu_addr, dma_addr_t handle, size_t size,
+                                unsigned long attrs)
+{
+       struct page *page = phys_to_page(dma_to_phys(dev, handle));
+
+       return __swiotlb_get_sgtable_page(sgt, page, size);
+}
+
 static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
 {
        if (swiotlb)
@@ -585,20 +600,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
         */
        gfp |= __GFP_ZERO;
 
-       if (gfpflags_allow_blocking(gfp)) {
-               struct page **pages;
-               pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
-
-               pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
-                                       handle, flush_page);
-               if (!pages)
-                       return NULL;
-
-               addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
-                                             __builtin_return_address(0));
-               if (!addr)
-                       iommu_dma_free(dev, pages, iosize, handle);
-       } else {
+       if (!gfpflags_allow_blocking(gfp)) {
                struct page *page;
                /*
                 * In atomic context we can't remap anything, so we'll only
@@ -622,6 +624,45 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
                                __free_from_pool(addr, size);
                        addr = NULL;
                }
+       } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
+               pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
+               struct page *page;
+
+               page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+                                                get_order(size), gfp);
+               if (!page)
+                       return NULL;
+
+               *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
+               if (iommu_dma_mapping_error(dev, *handle)) {
+                       dma_release_from_contiguous(dev, page,
+                                                   size >> PAGE_SHIFT);
+                       return NULL;
+               }
+               if (!coherent)
+                       __dma_flush_area(page_to_virt(page), iosize);
+
+               addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
+                                                  prot,
+                                                  __builtin_return_address(0));
+               if (!addr) {
+                       iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
+                       dma_release_from_contiguous(dev, page,
+                                                   size >> PAGE_SHIFT);
+               }
+       } else {
+               pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
+               struct page **pages;
+
+               pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
+                                       handle, flush_page);
+               if (!pages)
+                       return NULL;
+
+               addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
+                                             __builtin_return_address(0));
+               if (!addr)
+                       iommu_dma_free(dev, pages, iosize, handle);
        }
        return addr;
 }
@@ -633,7 +674,8 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
 
        size = PAGE_ALIGN(size);
        /*
-        * @cpu_addr will be one of 3 things depending on how it was allocated:
+        * @cpu_addr will be one of 4 things depending on how it was allocated:
+        * - A remapped array of pages for contiguous allocations.
         * - A remapped array of pages from iommu_dma_alloc(), for all
         *   non-atomic allocations.
         * - A non-cacheable alias from the atomic pool, for atomic
@@ -645,6 +687,12 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
        if (__in_atomic_pool(cpu_addr, size)) {
                iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
                __free_from_pool(cpu_addr, size);
+       } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
+               struct page *page = vmalloc_to_page(cpu_addr);
+
+               iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
+               dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+               dma_common_free_remap(cpu_addr, size, VM_USERMAP);
        } else if (is_vmalloc_addr(cpu_addr)){
                struct vm_struct *area = find_vm_area(cpu_addr);
 
@@ -671,6 +719,15 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
        if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
                return ret;
 
+       if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
+               /*
+                * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
+                * hence in the vmalloc space.
+                */
+               unsigned long pfn = vmalloc_to_pfn(cpu_addr);
+               return __swiotlb_mmap_pfn(vma, pfn, size);
+       }
+
        area = find_vm_area(cpu_addr);
        if (WARN_ON(!area || !area->pages))
                return -ENXIO;
@@ -685,6 +742,15 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        struct vm_struct *area = find_vm_area(cpu_addr);
 
+       if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
+               /*
+                * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
+                * hence in the vmalloc space.
+                */
+               struct page *page = vmalloc_to_page(cpu_addr);
+               return __swiotlb_get_sgtable_page(sgt, page, size);
+       }
+
        if (WARN_ON(!area || !area->pages))
                return -ENXIO;
 
@@ -872,4 +938,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 
        dev->archdata.dma_coherent = coherent;
        __iommu_setup_dma_ops(dev, dma_base, size, iommu);
+
+#ifdef CONFIG_XEN
+       if (xen_initial_domain()) {
+               dev->archdata.dev_dma_ops = dev->dma_ops;
+               dev->dma_ops = xen_dma_ops;
+       }
+#endif
 }