summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorSteve Cornelius <steve.cornelius@freescale.com>2012-06-28 15:27:16 -0700
committerJason Liu <r64343@freescale.com>2012-07-20 13:39:17 +0800
commit274efb68574acac15e1db22a3cdfe158892f8f66 (patch)
tree948658131503b74a56cd0d076897ce0adc1ff0c5 /drivers/crypto
parent1a92202042054b8ef148921c7c9a85f0f503cb64 (diff)
ENGR00215228-12: Move scatter/gather cache coherence into chained function.
Last driver revisions began to incorporate optimized mapping functions for scatter/gather list management, and then centralized them as inlinable functions usable from multiple modules. Since these became more globally useful, moved the coupled cache-coherence functions out of the mainline code and into the inlined ones for simplification. Signed-off-by: Steve Cornelius <steve.cornelius@freescale.com>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/caam/caamalg.c18
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h7
2 files changed, 7 insertions, 18 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 50308b08508d..d26e25cddbb1 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -727,12 +727,9 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
if (dst != src) {
dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
src_chained);
- dma_sync_sg_for_cpu(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
dst_chained);
} else {
- dma_sync_sg_for_cpu(dev, src, src_nents ? : 1,
- DMA_BIDIRECTIONAL);
dma_unmap_sg_chained(dev, src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
}
@@ -1174,18 +1171,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
DMA_BIDIRECTIONAL, assoc_chained);
- dma_sync_sg_for_device(jrdev, req->assoc, sgc,
- DMA_BIDIRECTIONAL);
if (likely(req->src == req->dst)) {
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
- dma_sync_sg_for_device(jrdev, req->src, sgc,
- DMA_BIDIRECTIONAL);
} else {
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_TO_DEVICE, src_chained);
- dma_sync_sg_for_device(jrdev, req->src, sgc,
- DMA_TO_DEVICE);
sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
DMA_FROM_DEVICE, dst_chained);
}
@@ -1365,18 +1356,12 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
DMA_BIDIRECTIONAL, assoc_chained);
- dma_sync_sg_for_device(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_BIDIRECTIONAL);
if (likely(req->src == req->dst)) {
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
- dma_sync_sg_for_device(jrdev, req->src, src_nents ? : 1,
- DMA_BIDIRECTIONAL);
} else {
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_TO_DEVICE, src_chained);
- dma_sync_sg_for_device(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE);
sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
DMA_FROM_DEVICE, dst_chained);
}
@@ -1531,12 +1516,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (likely(req->src == req->dst)) {
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
- dma_sync_sg_for_device(jrdev, req->src, sgc,
- DMA_BIDIRECTIONAL);
} else {
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_TO_DEVICE, src_chained);
- dma_sync_sg_for_device(jrdev, req->src, sgc, DMA_TO_DEVICE);
sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
DMA_FROM_DEVICE, dst_chained);
}
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 53499a2d02de..b2286ecce87b 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -100,6 +100,10 @@ static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg,
} else {
dma_map_sg(dev, sg, nents, dir);
}
+
+ if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
+ dma_sync_sg_for_device(dev, sg, nents, dir);
+
return nents;
}
@@ -107,6 +111,9 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
unsigned int nents, enum dma_data_direction dir,
bool chained)
{
+ if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
+ dma_sync_sg_for_cpu(dev, sg, nents, dir);
+
if (unlikely(chained)) {
int i;
for (i = 0; i < nents; i++) {