]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
common: dma-mapping: add support for generic dma_mmap_* calls
authorMarek Szyprowski <m.szyprowski@samsung.com>
Thu, 14 Jun 2012 11:03:04 +0000 (13:03 +0200)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Mon, 23 Jul 2012 16:21:03 +0000 (18:21 +0200)
Commit 9adc5374 ('common: dma-mapping: introduce mmap method') added a
generic method for implementing mmap user call to dma_map_ops structure.

This patch converts ARM and PowerPC architectures (the only providers of
dma_mmap_coherent/dma_mmap_writecombine calls) to use this generic
dma_map_ops based call and adds a generic cross architecture
definition for dma_mmap_attrs, dma_mmap_coherent, dma_mmap_writecombine
functions.

The generic mmap virt_to_page-based fallback implementation is provided for
architectures which don't provide their own implementation for mmap method.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Kyungmin Park <kyungmin.park@samsung.com>
arch/arm/include/asm/dma-mapping.h
arch/powerpc/include/asm/dma-mapping.h
arch/powerpc/kernel/dma-iommu.c
arch/powerpc/kernel/dma-swiotlb.c
arch/powerpc/kernel/dma.c
arch/powerpc/kernel/vio.c
drivers/base/dma-mapping.c
include/asm-generic/dma-coherent.h
include/asm-generic/dma-mapping-common.h

index bbef15d04890b7c1ef4a9d4afec77867de1fc72d..86450880dd6546c6a7b7c91fbfbef41588d74e51 100644 (file)
@@ -186,17 +186,6 @@ extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
                        void *cpu_addr, dma_addr_t dma_addr, size_t size,
                        struct dma_attrs *attrs);
 
-#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
-
-static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
-                                 void *cpu_addr, dma_addr_t dma_addr,
-                                 size_t size, struct dma_attrs *attrs)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-       BUG_ON(!ops);
-       return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
-}
-
 static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
                                       dma_addr_t *dma_handle, gfp_t flag)
 {
@@ -213,14 +202,6 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
        return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
 }
 
-static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
-                     void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-       DEFINE_DMA_ATTRS(attrs);
-       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
-       return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
-}
-
 /*
  * This can be called during boot to increase the size of the consistent
  * DMA region above it's default value of 2MB. It must be called before the
index 62678e365ca0768e0566329eb957f0058a04897d..78160874809a1e1e5faf8b049baec80a7bd24545 100644 (file)
@@ -27,7 +27,10 @@ extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
 extern void dma_direct_free_coherent(struct device *dev, size_t size,
                                     void *vaddr, dma_addr_t dma_handle,
                                     struct dma_attrs *attrs);
-
+extern int dma_direct_mmap_coherent(struct device *dev,
+                                   struct vm_area_struct *vma,
+                                   void *cpu_addr, dma_addr_t handle,
+                                   size_t size, struct dma_attrs *attrs);
 
 #ifdef CONFIG_NOT_COHERENT_CACHE
 /*
@@ -207,11 +210,8 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
-extern int dma_mmap_coherent(struct device *, struct vm_area_struct *,
-                            void *, dma_addr_t, size_t);
 #define ARCH_HAS_DMA_MMAP_COHERENT
 
-
 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                enum dma_data_direction direction)
 {
index bcfdcd22c766f43ebb29dbe495641892215b1cda..2d7bb8ced136c1e618d1e25229553a70512dc927 100644 (file)
@@ -109,6 +109,7 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
 struct dma_map_ops dma_iommu_ops = {
        .alloc                  = dma_iommu_alloc_coherent,
        .free                   = dma_iommu_free_coherent,
+       .mmap                   = dma_direct_mmap_coherent,
        .map_sg                 = dma_iommu_map_sg,
        .unmap_sg               = dma_iommu_unmap_sg,
        .dma_supported          = dma_iommu_dma_supported,
index 4ab88dafb235c8b29955bf1a70a3e04aa255db5a..46943651da23ba25b3e1093ea346fb31e154243c 100644 (file)
@@ -49,6 +49,7 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
 struct dma_map_ops swiotlb_dma_ops = {
        .alloc = dma_direct_alloc_coherent,
        .free = dma_direct_free_coherent,
+       .mmap = dma_direct_mmap_coherent,
        .map_sg = swiotlb_map_sg_attrs,
        .unmap_sg = swiotlb_unmap_sg_attrs,
        .dma_supported = swiotlb_dma_supported,
index b1ec983dcec8954ada2bf1cd9937c9d52706b333..062bf20e6dd4154cbe1b4ae62198926931dc0853 100644 (file)
@@ -65,6 +65,24 @@ void dma_direct_free_coherent(struct device *dev, size_t size,
 #endif
 }
 
+int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+                            void *cpu_addr, dma_addr_t handle, size_t size,
+                            struct dma_attrs *attrs)
+{
+       unsigned long pfn;
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
+#else
+       pfn = page_to_pfn(virt_to_page(cpu_addr));
+#endif
+       return remap_pfn_range(vma, vma->vm_start,
+                              pfn + vma->vm_pgoff,
+                              vma->vm_end - vma->vm_start,
+                              vma->vm_page_prot);
+}
+
 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
                             int nents, enum dma_data_direction direction,
                             struct dma_attrs *attrs)
@@ -154,6 +172,7 @@ static inline void dma_direct_sync_single(struct device *dev,
 struct dma_map_ops dma_direct_ops = {
        .alloc                          = dma_direct_alloc_coherent,
        .free                           = dma_direct_free_coherent,
+       .mmap                           = dma_direct_mmap_coherent,
        .map_sg                         = dma_direct_map_sg,
        .unmap_sg                       = dma_direct_unmap_sg,
        .dma_supported                  = dma_direct_dma_supported,
@@ -211,20 +230,3 @@ static int __init dma_init(void)
 }
 fs_initcall(dma_init);
 
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
-                     void *cpu_addr, dma_addr_t handle, size_t size)
-{
-       unsigned long pfn;
-
-#ifdef CONFIG_NOT_COHERENT_CACHE
-       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
-#else
-       pfn = page_to_pfn(virt_to_page(cpu_addr));
-#endif
-       return remap_pfn_range(vma, vma->vm_start,
-                              pfn + vma->vm_pgoff,
-                              vma->vm_end - vma->vm_start,
-                              vma->vm_page_prot);
-}
-EXPORT_SYMBOL_GPL(dma_mmap_coherent);
index cb87301ccd55a63a46f3069efe3b860e3da5c3c8..dda3d9ad109432852287c2f125218ad1c9eebe45 100644 (file)
@@ -613,6 +613,7 @@ static u64 vio_dma_get_required_mask(struct device *dev)
 struct dma_map_ops vio_dma_mapping_ops = {
        .alloc             = vio_dma_iommu_alloc_coherent,
        .free              = vio_dma_iommu_free_coherent,
+       .mmap              = dma_direct_mmap_coherent,
        .map_sg            = vio_dma_iommu_map_sg,
        .unmap_sg          = vio_dma_iommu_unmap_sg,
        .map_page          = vio_dma_iommu_map_page,
index 6f3676f1559f173b7767a2bdda577df2292ef461..db5db02e885f63e4d4eab67f84ef15759abfa09a 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/export.h>
 #include <linux/gfp.h>
+#include <asm-generic/dma-coherent.h>
 
 /*
  * Managed DMA API
@@ -218,3 +219,33 @@ void dmam_release_declared_memory(struct device *dev)
 EXPORT_SYMBOL(dmam_release_declared_memory);
 
 #endif
+
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ */
+int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+                   void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+       int ret = -ENXIO;
+#ifdef CONFIG_MMU
+       unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+       unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
+       unsigned long off = vma->vm_pgoff;
+
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+       if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+               return ret;
+
+       if (off < count && user_count <= (count - off)) {
+               ret = remap_pfn_range(vma, vma->vm_start,
+                                     pfn + off,
+                                     user_count << PAGE_SHIFT,
+                                     vma->vm_page_prot);
+       }
+#endif /* CONFIG_MMU */
+
+       return ret;
+}
+EXPORT_SYMBOL(dma_common_mmap);
index abfb2682de7f33b0dce686447849e9c49b85c0a5..2be8a2dbc868f86f84e90c0e79a755c1f51e672b 100644 (file)
@@ -29,6 +29,7 @@ dma_mark_declared_memory_occupied(struct device *dev,
 #else
 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
 #define dma_release_from_coherent(dev, order, vaddr) (0)
+#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
 #endif
 
 #endif
index 2e248d8924dc34c38fa0ed91fdaa619060c9a603..9073aeb3bb1a9208752afd311a476debf741b124 100644 (file)
@@ -176,4 +176,41 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
 
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+/**
+ * dma_mmap_attrs - map a coherent DMA allocation into user space
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @vma: vm_area_struct describing requested user mapping
+ * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
+ * @handle: device-view address returned from dma_alloc_attrs
+ * @size: size of memory originally requested in dma_alloc_attrs
+ * @attrs: attributes of mapping properties requested in dma_alloc_attrs
+ *
+ * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
+ * into user space.  The coherent DMA buffer must not be freed by the
+ * driver until the user space mapping has been released.
+ */
+static inline int
+dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
+              dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+       BUG_ON(!ops);
+       if (ops->mmap)
+               return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+       return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+
+static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+                     void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+       DEFINE_DMA_ATTRS(attrs);
+       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+       return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
+
 #endif