summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorVictoria Milhoan (b42089) <vicki.milhoan@freescale.com>2014-10-16 15:44:18 -0700
committerNitin Garg <nitin.garg@freescale.com>2015-04-14 14:01:41 -0500
commit1c954ccc92fbc00e8eb53f01c34f8bf566f13edf (patch)
treec41f1deebe942f564be7105f53c844b65db1cd6c /drivers/crypto
parentf90ff588e0f7ae41d6ca7ac033b2101ec859d37c (diff)
MLK-9769-4 Add CAAM driver cache coherency support
i.MX6 ARM platforms do not support hardware cache coherency. This patch adds cache coherency support to the CAAM driver. Signed-off-by: Victoria Milhoan <vicki.milhoan@freescale.com>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/caam/caamalg.c41
-rw-r--r--drivers/crypto/caam/caamhash.c70
-rw-r--r--drivers/crypto/caam/caamrng.c10
-rw-r--r--drivers/crypto/caam/jr.c23
-rw-r--r--drivers/crypto/caam/key_gen.c8
5 files changed, 130 insertions, 22 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index b71f2fd749df..905f95c64951 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1,7 +1,7 @@
/*
* caam - Freescale FSL CAAM support for crypto API
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008-2015 Freescale Semiconductor, Inc.
*
* Based on talitos crypto API driver.
*
@@ -287,6 +287,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_cpu(jrdev, ctx->sh_desc_enc_dma, desc_bytes(desc),
+ DMA_TO_DEVICE);
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
@@ -354,6 +356,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_cpu(jrdev, ctx->sh_desc_dec_dma, desc_bytes(desc),
+ DMA_TO_DEVICE);
/*
* Job Descriptor and Shared Descriptors
@@ -437,6 +441,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_cpu(jrdev, ctx->sh_desc_givenc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
return 0;
}
@@ -505,11 +511,15 @@ static int aead_setkey(struct crypto_aead *aead,
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->split_key_pad_len + keys.enckeylen, 1);
#endif
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->split_key_pad_len + keys.enckeylen,
+ DMA_TO_DEVICE);
ctx->enckeylen = keys.enckeylen;
@@ -548,6 +558,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -ENOMEM;
}
ctx->enckeylen = keylen;
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
/* ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
@@ -590,6 +601,9 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
@@ -636,6 +650,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
return ret;
}
@@ -1153,7 +1169,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
}
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_TO_DEVICE, assoc_chained);
+ DMA_BIDIRECTIONAL, assoc_chained);
if (likely(req->src == req->dst)) {
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
@@ -1177,9 +1193,10 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_len += dst_nents;
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+ dma_sync_single_for_device(jrdev, iv_dma, ivsize, DMA_TO_DEVICE);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
+ edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -1220,6 +1237,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sg_to_sec4_sg_last(req->dst, dst_nents,
edesc->sec4_sg + sec4_sg_index, 0);
}
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
return edesc;
}
@@ -1334,7 +1353,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
&dst_chained);
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_TO_DEVICE, assoc_chained);
+ DMA_BIDIRECTIONAL, assoc_chained);
if (likely(req->src == req->dst)) {
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
@@ -1367,8 +1386,10 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+ dma_sync_single_for_device(jrdev, iv_dma, ivsize, DMA_TO_DEVICE);
+
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
+ edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -1410,6 +1431,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
sg_to_sec4_sg_last(req->dst, dst_nents,
edesc->sec4_sg + sec4_sg_index, 0);
}
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
return edesc;
}
@@ -1501,6 +1524,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
* If so, include it. If not, create scatterlist.
*/
iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, iv_dma, ivsize, DMA_TO_DEVICE);
if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
iv_contig = true;
else
@@ -1509,7 +1533,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
+ edesc = kzalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -1539,8 +1563,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
+
edesc->iv_dma = iv_dma;
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
@@ -2189,6 +2217,7 @@ static int __init caam_algapi_init(void)
} else
list_add_tail(&t_alg->entry, &alg_list);
}
+
if (!list_empty(&alg_list))
pr_info("caam algorithms registered in /proc/crypto\n");
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index d97a03dbf42c..854ef523c60f 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1,7 +1,7 @@
/*
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
- * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2011-2015 Freescale Semiconductor, Inc.
*
* Based on caamalg.c crypto API driver.
*
@@ -166,6 +166,7 @@ static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
dma_addr_t buf_dma;
buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, buf_dma, buflen, DMA_TO_DEVICE);
dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
return buf_dma;
@@ -208,6 +209,9 @@ static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
u32 flag)
{
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
+ if ((flag == DMA_TO_DEVICE) || (flag == DMA_BIDIRECTIONAL))
+ dma_sync_single_for_device(jrdev, state->ctx_dma, ctx_len,
+ flag);
dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
}
@@ -353,6 +357,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
"ahash update first shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
@@ -371,6 +377,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
/* ahash_finup shared descriptor */
desc = ctx->sh_desc_finup;
@@ -389,6 +397,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_finup_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
@@ -409,6 +419,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
return 0;
}
@@ -446,6 +458,8 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
kfree(desc);
return -ENOMEM;
}
+ dma_sync_single_for_device(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+
dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, dst_dma)) {
@@ -490,6 +504,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
*keylen = digestsize;
dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
kfree(desc);
@@ -547,6 +562,10 @@ static int ahash_setkey(struct crypto_ahash *ahash,
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
+
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->split_key_pad_len,
+ DMA_TO_DEVICE);
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
@@ -594,8 +613,11 @@ static inline void ahash_unmap(struct device *dev,
if (edesc->src_nents)
dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
DMA_TO_DEVICE, edesc->chained);
- if (edesc->dst_dma)
+ if (edesc->dst_dma) {
+ dma_sync_single_for_cpu(dev, edesc->dst_dma, dst_len,
+ DMA_FROM_DEVICE);
dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
+ }
if (edesc->sec4_sg_bytes)
dma_unmap_single(dev, edesc->sec4_sg_dma,
@@ -610,8 +632,12 @@ static inline void ahash_unmap_ctx(struct device *dev,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
- if (state->ctx_dma)
+ if (state->ctx_dma) {
+ if ((flag == DMA_FROM_DEVICE) || (flag == DMA_BIDIRECTIONAL))
+ dma_sync_single_for_cpu(dev, state->ctx_dma,
+ ctx->ctx_len, flag);
dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+ }
ahash_unmap(dev, edesc, req, dst_len);
}
@@ -805,7 +831,7 @@ static int ahash_update_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -855,6 +881,9 @@ static int ahash_update_ctx(struct ahash_request *req)
append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
@@ -909,7 +938,7 @@ static int ahash_final_ctx(struct ahash_request *req)
sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -941,6 +970,9 @@ static int ahash_final_ctx(struct ahash_request *req)
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
@@ -985,7 +1017,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -1020,6 +1052,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
@@ -1060,7 +1095,7 @@ static int ahash_digest(struct ahash_request *req)
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
+ edesc = kzalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
DESC_JOB_IO_LEN, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -1070,6 +1105,7 @@ static int ahash_digest(struct ahash_request *req)
DESC_JOB_IO_LEN;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->src_nents = src_nents;
edesc->chained = chained;
@@ -1087,6 +1123,9 @@ static int ahash_digest(struct ahash_request *req)
}
append_seq_in_ptr(desc, src_dma, req->nbytes, options);
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma,
+ edesc->sec4_sg_bytes, DMA_TO_DEVICE);
+
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
@@ -1125,7 +1164,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
int sh_len;
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -1144,6 +1183,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
digestsize);
edesc->src_nents = 0;
+ dma_sync_single_for_device(jrdev, state->buf_dma, buflen,
+ DMA_TO_DEVICE);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
@@ -1196,7 +1237,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -1233,6 +1274,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma,
+ sec4_sg_bytes, DMA_TO_DEVICE);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
@@ -1295,7 +1338,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -1327,6 +1370,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
@@ -1381,7 +1427,7 @@ static int ahash_update_first(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -1421,6 +1467,8 @@ static int ahash_update_first(struct ahash_request *req)
map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma,
+ sec4_sg_bytes, DMA_TO_DEVICE);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 28486b19fc36..aa151a7154df 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -1,7 +1,7 @@
/*
* caam - Freescale FSL CAAM support for hw_random
*
- * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2011-2015 Freescale Semiconductor, Inc.
*
* Based on caamalg.c crypto API driver.
*
@@ -80,9 +80,12 @@ static struct caam_rng_ctx rng_ctx;
static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
{
- if (bd->addr)
+ if (bd->addr) {
+ dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE,
+ DMA_FROM_DEVICE);
dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
DMA_FROM_DEVICE);
+ }
}
static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
@@ -206,6 +209,9 @@ static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dma, desc_bytes(desc),
+ DMA_TO_DEVICE);
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
desc, desc_bytes(desc), 1);
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 4cadd4c79691..e139c9dcbab8 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -168,6 +168,9 @@ static void caam_jr_dequeue(unsigned long devarg)
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
void *userarg;
+ dma_addr_t outbusaddr;
+
+ outbusaddr = rd_reg64(&jrp->rregs->outring_base);
while (rd_reg32(&jrp->rregs->outring_used)) {
@@ -177,6 +180,9 @@ static void caam_jr_dequeue(unsigned long devarg)
sw_idx = tail = jrp->tail;
hw_idx = jrp->out_ring_read_index;
+ dma_sync_single_for_cpu(dev, outbusaddr,
+ sizeof(struct jr_outentry) * JOBR_DEPTH,
+ DMA_FROM_DEVICE);
for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
sw_idx = (tail + i) & (JOBR_DEPTH - 1);
@@ -204,6 +210,8 @@ static void caam_jr_dequeue(unsigned long devarg)
userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
userstatus = jrp->outring[hw_idx].jrstatus;
+ smp_mb();
+
/* set done */
wr_reg32(&jrp->rregs->outring_rmvd, 1);
@@ -323,7 +331,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
struct caam_jrentry_info *head_entry;
int head, tail, desc_size;
- dma_addr_t desc_dma;
+ dma_addr_t desc_dma, inpbusaddr;
desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
@@ -332,6 +340,13 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
return -EIO;
}
+ dma_sync_single_for_device(dev, desc_dma, desc_size, DMA_TO_DEVICE);
+
+ inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
+ dma_sync_single_for_device(dev, inpbusaddr,
+ sizeof(dma_addr_t) * JOBR_DEPTH,
+ DMA_TO_DEVICE);
+
spin_lock_bh(&jrp->inplock);
head = jrp->head;
@@ -353,12 +368,18 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
+ dma_sync_single_for_device(dev, inpbusaddr,
+ sizeof(dma_addr_t) * JOBR_DEPTH,
+ DMA_TO_DEVICE);
+
smp_wmb();
jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
(JOBR_DEPTH - 1);
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
+ wmb();
+
wr_reg32(&jrp->rregs->inpring_jobadd, 1);
spin_unlock_bh(&jrp->inplock);
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index b872eed2957b..3d6a921c7d2c 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -1,7 +1,7 @@
/*
* CAAM/SEC 4.x functions for handling key-generation jobs
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008-2015 Freescale Semiconductor, Inc.
*
*/
#include "compat.h"
@@ -74,6 +74,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
}
init_job_desc(desc, 0);
+
+ dma_sync_single_for_device(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
+
append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
/* Sets MDHA up into an HMAC-INIT */
@@ -114,7 +117,8 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
split_key_pad_len, 1);
#endif
}
-
+ dma_sync_single_for_cpu(jrdev, dma_addr_out, split_key_pad_len,
+ DMA_FROM_DEVICE);
dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
DMA_FROM_DEVICE);
out_unmap_in: