]> git.karo-electronics.de Git - linux-beck.git/commitdiff
powerpc: Use the newly added get_required_mask dma_map_ops hook
authorMilton Miller <miltonm@bga.com>
Fri, 24 Jun 2011 09:05:24 +0000 (09:05 +0000)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>
Mon, 19 Sep 2011 23:19:35 +0000 (09:19 +1000)
Now that the generic code has dma_map_ops set, instead of having a
messy ifdef & if block in the base dma_get_required_mask hook push
the computation into the dma ops.

If the ops fails to set the get_required_mask hook default to the
width of dma_addr_t.

This also corrects ibmbus ibmebus_dma_supported to require a 64
bit mask.  I doubt anything is checking or setting the dma mask on
that bus.

Signed-off-by: Milton Miller <miltonm@bga.com>
Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-kernel@vger.kernel.org
Cc: benh@kernel.crashing.org
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
arch/powerpc/include/asm/device.h
arch/powerpc/include/asm/dma-mapping.h
arch/powerpc/kernel/dma-iommu.c
arch/powerpc/kernel/dma-swiotlb.c
arch/powerpc/kernel/dma.c
arch/powerpc/kernel/ibmebus.c
arch/powerpc/kernel/vio.c
arch/powerpc/platforms/cell/iommu.c
arch/powerpc/platforms/ps3/system-bus.c
arch/powerpc/platforms/pseries/iommu.c

index 16d25c0974be9d7a4afa7c60f9b11942735caa16..d57c08acedfc2e5f1167a1f3a861094662c486fb 100644 (file)
@@ -37,4 +37,6 @@ struct pdev_archdata {
        u64 dma_mask;
 };
 
+#define ARCH_HAS_DMA_GET_REQUIRED_MASK
+
 #endif /* _ASM_POWERPC_DEVICE_H */
index 8135e66a4bb96bb5768968b35f5543e1a3f11a83..dd70fac57ec896253990fc1761fd3ad96fa6098a 100644 (file)
@@ -20,8 +20,6 @@
 
 #define DMA_ERROR_CODE         (~(dma_addr_t)0x0)
 
-#define ARCH_HAS_DMA_GET_REQUIRED_MASK
-
 /* Some dma direct funcs must be visible for use in other dma_ops */
 extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
                                       dma_addr_t *dma_handle, gfp_t flag);
@@ -71,7 +69,6 @@ static inline unsigned long device_to_mask(struct device *dev)
  */
 #ifdef CONFIG_PPC64
 extern struct dma_map_ops dma_iommu_ops;
-extern u64 dma_iommu_get_required_mask(struct device *dev);
 #endif
 extern struct dma_map_ops dma_direct_ops;
 
index 1f2a711a261e02dddf7c5bd5c62fbe3f022e1266..c1ad9db934f69973692901883066695a521c016d 100644 (file)
@@ -90,7 +90,7 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
                return 1;
 }
 
-u64 dma_iommu_get_required_mask(struct device *dev)
+static u64 dma_iommu_get_required_mask(struct device *dev)
 {
        struct iommu_table *tbl = get_iommu_table_base(dev);
        u64 mask;
@@ -111,5 +111,6 @@ struct dma_map_ops dma_iommu_ops = {
        .dma_supported  = dma_iommu_dma_supported,
        .map_page       = dma_iommu_map_page,
        .unmap_page     = dma_iommu_unmap_page,
+       .get_required_mask      = dma_iommu_get_required_mask,
 };
 EXPORT_SYMBOL(dma_iommu_ops);
index 4295e0b94b2db31238f008c49756955fd4922702..1ebc9189aada9ff21d9268b71e486589010a20a8 100644 (file)
 
 unsigned int ppc_swiotlb_enable;
 
+static u64 swiotlb_powerpc_get_required(struct device *dev)
+{
+       u64 end, mask, max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
+
+       end = memblock_end_of_DRAM();
+       if (max_direct_dma_addr && end > max_direct_dma_addr)
+               end = max_direct_dma_addr;
+       end += get_dma_offset(dev);
+
+       mask = 1ULL << (fls64(end) - 1);
+       mask += mask - 1;
+
+       return mask;
+}
+
 /*
  * At the moment, all platforms that use this code only require
  * swiotlb to be used if we're operating on HIGHMEM.  Since
@@ -44,6 +59,7 @@ struct dma_map_ops swiotlb_dma_ops = {
        .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
        .sync_sg_for_device = swiotlb_sync_sg_for_device,
        .mapping_error = swiotlb_dma_mapping_error,
+       .get_required_mask = swiotlb_powerpc_get_required,
 };
 
 void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
index 503093efa202bfc6e29ab2761cd90bd943fef2dc..10b136afbf5052e5adbc1567f82bbbc02bba49b8 100644 (file)
@@ -96,6 +96,18 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask)
 #endif
 }
 
+static u64 dma_direct_get_required_mask(struct device *dev)
+{
+       u64 end, mask;
+
+       end = memblock_end_of_DRAM() + get_dma_offset(dev);
+
+       mask = 1ULL << (fls64(end) - 1);
+       mask += mask - 1;
+
+       return mask;
+}
+
 static inline dma_addr_t dma_direct_map_page(struct device *dev,
                                             struct page *page,
                                             unsigned long offset,
@@ -144,6 +156,7 @@ struct dma_map_ops dma_direct_ops = {
        .dma_supported  = dma_direct_dma_supported,
        .map_page       = dma_direct_map_page,
        .unmap_page     = dma_direct_unmap_page,
+       .get_required_mask      = dma_direct_get_required_mask,
 #ifdef CONFIG_NOT_COHERENT_CACHE
        .sync_single_for_cpu            = dma_direct_sync_single,
        .sync_single_for_device         = dma_direct_sync_single,
@@ -173,7 +186,6 @@ EXPORT_SYMBOL(dma_set_mask);
 u64 dma_get_required_mask(struct device *dev)
 {
        struct dma_map_ops *dma_ops = get_dma_ops(dev);
-       u64 mask, end = 0;
 
        if (ppc_md.dma_get_required_mask)
                return ppc_md.dma_get_required_mask(dev);
@@ -181,31 +193,10 @@ u64 dma_get_required_mask(struct device *dev)
        if (unlikely(dma_ops == NULL))
                return 0;
 
-#ifdef CONFIG_PPC64
-       else if (dma_ops == &dma_iommu_ops)
-               return dma_iommu_get_required_mask(dev);
-#endif
-#ifdef CONFIG_SWIOTLB
-       else if (dma_ops == &swiotlb_dma_ops) {
-               u64 max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
-
-               end = memblock_end_of_DRAM();
-               if (max_direct_dma_addr && end > max_direct_dma_addr)
-                       end = max_direct_dma_addr;
-               end += get_dma_offset(dev);
-       }
-#endif
-       else if (dma_ops == &dma_direct_ops)
-               end = memblock_end_of_DRAM() + get_dma_offset(dev);
-       else {
-               WARN_ONCE(1, "%s: unknown ops %p\n", __func__, dma_ops);
-               end = memblock_end_of_DRAM();
-       }
+       if (dma_ops->get_required_mask)
+               return dma_ops->get_required_mask(dev);
 
-       mask = 1ULL << (fls64(end) - 1);
-       mask += mask - 1;
-
-       return mask;
+       return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
 }
 EXPORT_SYMBOL_GPL(dma_get_required_mask);
 
index 28581f1ad2c08a8494502e2335cfeef2a295577e..90ef2a44613ba5403230458e42e335d455b32e10 100644 (file)
@@ -125,7 +125,12 @@ static void ibmebus_unmap_sg(struct device *dev,
 
 static int ibmebus_dma_supported(struct device *dev, u64 mask)
 {
-       return 1;
+       return mask == DMA_BIT_MASK(64);
+}
+
+static u64 ibmebus_dma_get_required_mask(struct device *dev)
+{
+       return DMA_BIT_MASK(64);
 }
 
 static struct dma_map_ops ibmebus_dma_ops = {
@@ -134,6 +139,7 @@ static struct dma_map_ops ibmebus_dma_ops = {
        .map_sg         = ibmebus_map_sg,
        .unmap_sg       = ibmebus_unmap_sg,
        .dma_supported  = ibmebus_dma_supported,
+       .get_required_mask  = ibmebus_dma_get_required_mask,
        .map_page       = ibmebus_map_page,
        .unmap_page     = ibmebus_unmap_page,
 };
index 1b695fdc362b8b10f0551072e74b949dd04d4761..c0493259d1332f579f318ac3f6f9481a366f0bad 100644 (file)
@@ -605,6 +605,11 @@ static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
         return dma_iommu_ops.dma_supported(dev, mask);
 }
 
+static u64 vio_dma_get_required_mask(struct device *dev)
+{
+        return dma_iommu_ops.get_required_mask(dev);
+}
+
 struct dma_map_ops vio_dma_mapping_ops = {
        .alloc_coherent = vio_dma_iommu_alloc_coherent,
        .free_coherent  = vio_dma_iommu_free_coherent,
@@ -613,7 +618,7 @@ struct dma_map_ops vio_dma_mapping_ops = {
        .map_page       = vio_dma_iommu_map_page,
        .unmap_page     = vio_dma_iommu_unmap_page,
        .dma_supported  = vio_dma_iommu_dma_supported,
-
+       .get_required_mask = vio_dma_get_required_mask,
 };
 
 /**
index 5ef55f3b0987f54f78445dbc8cc1fc34f51f1c08..fc46fcac392199e6ad5fd8c1862b28ae2cb8e1b3 100644 (file)
@@ -1161,11 +1161,20 @@ __setup("iommu_fixed=", setup_iommu_fixed);
 
 static u64 cell_dma_get_required_mask(struct device *dev)
 {
+       struct dma_map_ops *dma_ops;
+
        if (!dev->dma_mask)
                return 0;
 
-       if (iommu_fixed_disabled && get_dma_ops(dev) == &dma_iommu_ops)
-               return dma_iommu_get_required_mask(dev);
+       if (!iommu_fixed_disabled &&
+                       cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
+               return DMA_BIT_MASK(64);
+
+       dma_ops = get_dma_ops(dev);
+       if (dma_ops->get_required_mask)
+               return dma_ops->get_required_mask(dev);
+
+       WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops);
 
        return DMA_BIT_MASK(64);
 }
index 23083c397528408e182b2e2cd8be6d5078aafdab..688141c76e03d6f616e4c76d0e7de3c275dbe86f 100644 (file)
@@ -695,12 +695,18 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
        return mask >= DMA_BIT_MASK(32);
 }
 
+static u64 ps3_dma_get_required_mask(struct device *_dev)
+{
+       return DMA_BIT_MASK(32);
+}
+
 static struct dma_map_ops ps3_sb_dma_ops = {
        .alloc_coherent = ps3_alloc_coherent,
        .free_coherent = ps3_free_coherent,
        .map_sg = ps3_sb_map_sg,
        .unmap_sg = ps3_sb_unmap_sg,
        .dma_supported = ps3_dma_supported,
+       .get_required_mask = ps3_dma_get_required_mask,
        .map_page = ps3_sb_map_page,
        .unmap_page = ps3_unmap_page,
 };
@@ -711,6 +717,7 @@ static struct dma_map_ops ps3_ioc0_dma_ops = {
        .map_sg = ps3_ioc0_map_sg,
        .unmap_sg = ps3_ioc0_unmap_sg,
        .dma_supported = ps3_dma_supported,
+       .get_required_mask = ps3_dma_get_required_mask,
        .map_page = ps3_ioc0_map_page,
        .unmap_page = ps3_unmap_page,
 };
index fe5ededf0d60e456a6ef3e50290fe76786ecb146..9f121a37eb516f2529d5d21386d4e5e41562b711 100644 (file)
@@ -1099,7 +1099,7 @@ static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
                        return DMA_BIT_MASK(64);
        }
 
-       return dma_iommu_get_required_mask(dev);
+       return dma_iommu_ops.get_required_mask(dev);
 }
 
 #else  /* CONFIG_PCI */