]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
ENGR00215228-12: Move scatter/gather cache coherence into chained function.
authorSteve Cornelius <steve.cornelius@freescale.com>
Thu, 28 Jun 2012 22:27:16 +0000 (15:27 -0700)
committerLothar Waßmann <LW@KARO-electronics.de>
Fri, 24 May 2013 06:34:54 +0000 (08:34 +0200)
Last driver revisions began to incorporate optimized mapping functions
for scatter/gather list management, and then centralized them as inlinable
functions usable from multiple modules. Since these became more globally
useful, moved the coupled cache-coherence functions out of the mainline code
and into the inlined ones for simplification.

Signed-off-by: Steve Cornelius <steve.cornelius@freescale.com>
drivers/crypto/caam/caamalg.c
drivers/crypto/caam/sg_sw_sec4.h

index 50308b08508d183aab11c9ce1da57d6cedc85b9f..d26e25cddbb16a576652fb32e7e3c35675094f84 100644 (file)
@@ -727,12 +727,9 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
        if (dst != src) {
                dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
                                     src_chained);
-               dma_sync_sg_for_cpu(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
                dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
                                     dst_chained);
        } else {
-               dma_sync_sg_for_cpu(dev, src, src_nents ? : 1,
-                                   DMA_BIDIRECTIONAL);
                dma_unmap_sg_chained(dev, src, src_nents ? : 1,
                                     DMA_BIDIRECTIONAL, src_chained);
        }
@@ -1174,18 +1171,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 
        sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
                                 DMA_BIDIRECTIONAL, assoc_chained);
-       dma_sync_sg_for_device(jrdev, req->assoc, sgc,
-                              DMA_BIDIRECTIONAL);
        if (likely(req->src == req->dst)) {
                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
                                         DMA_BIDIRECTIONAL, src_chained);
-               dma_sync_sg_for_device(jrdev, req->src, sgc,
-                                      DMA_BIDIRECTIONAL);
        } else {
                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
                                         DMA_TO_DEVICE, src_chained);
-               dma_sync_sg_for_device(jrdev, req->src, sgc,
-                                      DMA_TO_DEVICE);
                sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
                                         DMA_FROM_DEVICE, dst_chained);
        }
@@ -1365,18 +1356,12 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
 
        sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
                                 DMA_BIDIRECTIONAL, assoc_chained);
-       dma_sync_sg_for_device(jrdev, req->assoc, assoc_nents ? : 1,
-                              DMA_BIDIRECTIONAL);
        if (likely(req->src == req->dst)) {
                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
                                         DMA_BIDIRECTIONAL, src_chained);
-               dma_sync_sg_for_device(jrdev, req->src, src_nents ? : 1,
-                                      DMA_BIDIRECTIONAL);
        } else {
                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
                                         DMA_TO_DEVICE, src_chained);
-               dma_sync_sg_for_device(jrdev, req->src, src_nents ? : 1,
-                                      DMA_TO_DEVICE);
                sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
                                         DMA_FROM_DEVICE, dst_chained);
        }
@@ -1531,12 +1516,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
        if (likely(req->src == req->dst)) {
                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
                                         DMA_BIDIRECTIONAL, src_chained);
-               dma_sync_sg_for_device(jrdev, req->src, sgc,
-                                      DMA_BIDIRECTIONAL);
        } else {
                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
                                         DMA_TO_DEVICE, src_chained);
-               dma_sync_sg_for_device(jrdev, req->src, sgc, DMA_TO_DEVICE);
                sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
                                         DMA_FROM_DEVICE, dst_chained);
        }
index 53499a2d02de73cc7ef255e699dadf635fc24e57..b2286ecce87b30c344fc9f60a6274e0d446e8aaa 100644 (file)
@@ -100,6 +100,10 @@ static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg,
        } else {
                dma_map_sg(dev, sg, nents, dir);
        }
+
+       if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
+               dma_sync_sg_for_device(dev, sg, nents, dir);
+
        return nents;
 }
 
@@ -107,6 +111,9 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
                                unsigned int nents, enum dma_data_direction dir,
                                bool chained)
 {
+       if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
+               dma_sync_sg_for_cpu(dev, sg, nents, dir);
+
        if (unlikely(chained)) {
                int i;
                for (i = 0; i < nents; i++) {