summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2014-01-02 11:45:27 -0800
committerOlof Johansson <olof@lixom.net>2014-01-02 11:45:27 -0800
commita7dedb4fead89f88255862a9bc46f4c7ec094c2e (patch)
treed07b09522364b895dc91ccd2d057668d2fa692c5 /drivers/crypto
parent9dc9b8e51f95fce0cc3d50ef80402203d2dcd68a (diff)
parent6075a8b2b6c32ddcb99b85189ae41ab2903e560f (diff)
Merge tag 'davinci-for-v3.14/gpio' of git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci into next/drivers
From Sekhar Nori: DaVinci GPIO driver updates --------------------------- This pull request contains updates to DaVinci GPIO driver and the resultant platform code changes. The updates include DT-conversion and changes to make the driver cross-platform ready. * tag 'davinci-for-v3.14/gpio' of git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci: gpio: davinci: don't create irq_domain in case of unbanked irqs gpio: davinci: use chained_irq_enter/chained_irq_exit API gpio: davinci: add OF support gpio: davinci: remove unused variable intc_irq_num gpio: davinci: convert to use irqdomain support. gpio: introduce GPIO_DAVINCI kconfig option gpio: davinci: get rid of DAVINCI_N_GPIO gpio: davinci: use {readl|writel}_relaxed() instead of __raw_* Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/caam/caamalg.c51
-rw-r--r--drivers/crypto/caam/jr.c1
-rw-r--r--drivers/crypto/talitos.c68
3 files changed, 69 insertions, 51 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 4f44b71b9e24..4cf5dec826e1 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -818,7 +818,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
ivsize, 1);
print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
- req->cryptlen, 1);
+ req->cryptlen - ctx->authsize, 1);
#endif
if (err) {
@@ -972,12 +972,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
(edesc->src_nents ? : 1);
in_options = LDST_SGF;
}
- if (encrypt)
- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
- req->cryptlen - authsize, in_options);
- else
- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
- req->cryptlen, in_options);
+
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
+ in_options);
if (likely(req->src == req->dst)) {
if (all_contig) {
@@ -998,7 +995,8 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
}
}
if (encrypt)
- append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
+ out_options);
else
append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
out_options);
@@ -1048,8 +1046,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
in_options = LDST_SGF;
}
- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
- req->cryptlen - authsize, in_options);
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
+ in_options);
if (contig & GIV_DST_CONTIG) {
dst_dma = edesc->iv_dma;
@@ -1066,7 +1064,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
}
}
- append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
+ append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
+ out_options);
}
/*
@@ -1130,7 +1129,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
* allocate and map the aead extended descriptor
*/
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
- int desc_bytes, bool *all_contig_ptr)
+ int desc_bytes, bool *all_contig_ptr,
+ bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -1145,12 +1145,22 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
bool assoc_chained = false, src_chained = false, dst_chained = false;
int ivsize = crypto_aead_ivsize(aead);
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+ unsigned int authsize = ctx->authsize;
assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
- src_nents = sg_count(req->src, req->cryptlen, &src_chained);
- if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
+ if (unlikely(req->dst != req->src)) {
+ src_nents = sg_count(req->src, req->cryptlen, &src_chained);
+ dst_nents = sg_count(req->dst,
+ req->cryptlen +
+ (encrypt ? authsize : (-authsize)),
+ &dst_chained);
+ } else {
+ src_nents = sg_count(req->src,
+ req->cryptlen +
+ (encrypt ? authsize : 0),
+ &src_chained);
+ }
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
DMA_TO_DEVICE, assoc_chained);
@@ -1234,11 +1244,9 @@ static int aead_encrypt(struct aead_request *req)
u32 *desc;
int ret = 0;
- req->cryptlen += ctx->authsize;
-
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &all_contig);
+ CAAM_CMD_SZ, &all_contig, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1275,7 +1283,7 @@ static int aead_decrypt(struct aead_request *req)
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &all_contig);
+ CAAM_CMD_SZ, &all_contig, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1332,7 +1340,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
src_nents = sg_count(req->src, req->cryptlen, &src_chained);
if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
+ dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
+ &dst_chained);
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
DMA_TO_DEVICE, assoc_chained);
@@ -1426,8 +1435,6 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
u32 *desc;
int ret = 0;
- req->cryptlen += ctx->authsize;
-
/* allocate extended descriptor */
edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
CAAM_CMD_SZ, &contig);
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index d23356d20e1c..1d80bd3636c5 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -6,6 +6,7 @@
*/
#include <linux/of_irq.h>
+#include <linux/of_address.h>
#include "compat.h"
#include "regs.h"
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 905de4427e7c..b44f4ddc565c 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -790,7 +790,7 @@ static void ipsec_esp_unmap(struct device *dev,
if (edesc->assoc_chained)
talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
- else
+ else if (areq->assoclen)
/* assoc_nents counts also for IV in non-contiguous cases */
dma_unmap_sg(dev, areq->assoc,
edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
@@ -973,7 +973,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
} else {
- to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->assoc));
+ if (areq->assoclen)
+ to_talitos_ptr(&desc->ptr[1],
+ sg_dma_address(areq->assoc));
+ else
+ to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
desc->ptr[1].j_extent = 0;
}
@@ -1108,7 +1112,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
unsigned int authsize,
unsigned int ivsize,
int icv_stashing,
- u32 cryptoflags)
+ u32 cryptoflags,
+ bool encrypt)
{
struct talitos_edesc *edesc;
int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
@@ -1122,10 +1127,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
return ERR_PTR(-EINVAL);
}
- if (iv)
+ if (ivsize)
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
- if (assoc) {
+ if (assoclen) {
/*
* Currently it is assumed that iv is provided whenever assoc
* is.
@@ -1141,19 +1146,17 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
}
- src_nents = sg_count(src, cryptlen + authsize, &src_chained);
- src_nents = (src_nents == 1) ? 0 : src_nents;
-
- if (!dst) {
- dst_nents = 0;
- } else {
- if (dst == src) {
- dst_nents = src_nents;
- } else {
- dst_nents = sg_count(dst, cryptlen + authsize,
- &dst_chained);
- dst_nents = (dst_nents == 1) ? 0 : dst_nents;
- }
+ if (!dst || dst == src) {
+ src_nents = sg_count(src, cryptlen + authsize, &src_chained);
+ src_nents = (src_nents == 1) ? 0 : src_nents;
+ dst_nents = dst ? src_nents : 0;
+ } else { /* dst && dst != src*/
+ src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
+ &src_chained);
+ src_nents = (src_nents == 1) ? 0 : src_nents;
+ dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
+ &dst_chained);
+ dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}
/*
@@ -1173,9 +1176,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
- talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
+ if (assoc_chained)
+ talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
+ else if (assoclen)
+ dma_unmap_sg(dev, assoc,
+ assoc_nents ? assoc_nents - 1 : 1,
+ DMA_TO_DEVICE);
+
if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+
dev_err(dev, "could not allocate edescriptor\n");
return ERR_PTR(-ENOMEM);
}
@@ -1197,7 +1207,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
}
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
- int icv_stashing)
+ int icv_stashing, bool encrypt)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
@@ -1206,7 +1216,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
iv, areq->assoclen, areq->cryptlen,
ctx->authsize, ivsize, icv_stashing,
- areq->base.flags);
+ areq->base.flags, encrypt);
}
static int aead_encrypt(struct aead_request *req)
@@ -1216,7 +1226,7 @@ static int aead_encrypt(struct aead_request *req)
struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, req->iv, 0);
+ edesc = aead_edesc_alloc(req, req->iv, 0, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1239,7 +1249,7 @@ static int aead_decrypt(struct aead_request *req)
req->cryptlen -= authsize;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, req->iv, 1);
+ edesc = aead_edesc_alloc(req, req->iv, 1, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1285,7 +1295,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req)
struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(areq, req->giv, 0);
+ edesc = aead_edesc_alloc(areq, req->giv, 0, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1441,7 +1451,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
}
static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
- areq)
+ areq, bool encrypt)
{
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
@@ -1449,7 +1459,7 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
areq->info, 0, areq->nbytes, 0, ivsize, 0,
- areq->base.flags);
+ areq->base.flags, encrypt);
}
static int ablkcipher_encrypt(struct ablkcipher_request *areq)
@@ -1459,7 +1469,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(areq);
+ edesc = ablkcipher_edesc_alloc(areq, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1476,7 +1486,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(areq);
+ edesc = ablkcipher_edesc_alloc(areq, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1628,7 +1638,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
- nbytes, 0, 0, 0, areq->base.flags);
+ nbytes, 0, 0, 0, areq->base.flags, false);
}
static int ahash_init(struct ahash_request *areq)