summaryrefslogtreecommitdiff
path: root/drivers/crypto/caam/caamhash.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/caam/caamhash.c')
-rw-r--r--drivers/crypto/caam/caamhash.c303
1 files changed, 299 insertions, 4 deletions
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 9eac4dcb9971..c3987747138d 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1,7 +1,7 @@
/*
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
- * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -62,6 +62,7 @@
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
+#include <linux/string.h>
#define CAAM_CRA_PRIORITY 3000
@@ -115,6 +116,7 @@ struct caam_hash_ctx {
u8 key[CAAM_MAX_HASH_KEY_SIZE];
dma_addr_t key_dma;
int ctx_len;
+ unsigned int key_len;
unsigned int split_key_len;
unsigned int split_key_pad_len;
};
@@ -232,6 +234,13 @@ static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
KEY_DEST_MDHA_SPLIT | KEY_ENC);
}
+static inline void append_key_axcbc(u32 *desc, struct caam_hash_ctx *ctx)
+{
+ append_key_as_imm(desc, ctx->key, ctx->key_len,
+ ctx->key_len, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+}
+
/* Append key if it has been set */
static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
{
@@ -253,6 +262,25 @@ static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
}
+static inline void init_sh_desc_key_axcbc(u32 *desc, struct caam_hash_ctx *ctx)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ if (ctx->key_len) {
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ append_key_axcbc(desc, ctx);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+ }
+
+ /* Propagate errors from shared to job descriptor */
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+
+}
/*
* For ahash read data from seqin following state->caam_ctx,
* and write resulting class2 context to seqout, which may be state->caam_ctx
@@ -272,6 +300,20 @@ static inline void ahash_append_load_str(u32 *desc, int digestsize)
LDST_SRCDST_BYTE_CONTEXT);
}
+static inline void axcbc_append_load_str(u32 *desc, int digestsize)
+{
+ /* Calculate remaining bytes to read */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_LAST1 |
+ FIFOLD_TYPE_MSG | KEY_VLF);
+
+ /* Store class1 context bytes */
+ append_seq_store(desc, digestsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+}
+
/*
* For ahash update, final and finup, import context, read and write to seqout
*/
@@ -294,6 +336,27 @@ static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
ahash_append_load_str(desc, digestsize);
}
+/*
+ * For ahash update, final and finup, import context, read and write to seqout
+ */
+static inline void axcbc_ctx_data_to_out(u32 *desc, u32 op, u32 state,
+ int digestsize,
+ struct caam_hash_ctx *ctx)
+{
+ init_sh_desc_key_axcbc(desc, ctx);
+
+ /* Import context from software */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | ctx->ctx_len);
+
+ /* Class 1 operation */
+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ */
+ axcbc_append_load_str(desc, digestsize);
+}
/* For ahash firsts and digest, read and write to seqout */
static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
int digestsize, struct caam_hash_ctx *ctx)
@@ -309,6 +372,21 @@ static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
ahash_append_load_str(desc, digestsize);
}
+/* For ahash firsts and digest, read and write to seqout */
+static inline void axcbc_data_to_out(u32 *desc, u32 op, u32 state,
+ int digestsize, struct caam_hash_ctx *ctx)
+{
+ init_sh_desc_key_axcbc(desc, ctx);
+
+ /* Class 1 operation */
+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ */
+ axcbc_append_load_str(desc, digestsize);
+}
+
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
{
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
@@ -426,6 +504,124 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return 0;
}
+static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct device *jrdev = ctx->jrdev;
+ u32 have_key = 0;
+ u32 *desc;
+
+ /* ahash_update shared descriptor */
+ desc = ctx->sh_desc_update;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Import context from software */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | ctx->ctx_len);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
+ OP_ALG_ENCRYPT);
+
+ /* Load data and write to result or context */
+ axcbc_append_load_str(desc, ctx->ctx_len);
+
+ ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash update shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ /* ahash_update_first shared descriptor */
+ desc = ctx->sh_desc_update_first;
+
+ axcbc_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
+ ctx->ctx_len, ctx);
+
+ ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash update first shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /* ahash_final shared descriptor */
+ desc = ctx->sh_desc_fin;
+
+ axcbc_ctx_data_to_out(desc, have_key | ctx->alg_type,
+ OP_ALG_AS_FINALIZE, digestsize, ctx);
+
+ ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /* ahash_finup shared descriptor */
+ desc = ctx->sh_desc_finup;
+
+ axcbc_ctx_data_to_out(desc, have_key | ctx->alg_type,
+ OP_ALG_AS_FINALIZE, digestsize, ctx);
+
+ ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_finup_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /* ahash_digest shared descriptor */
+ desc = ctx->sh_desc_digest;
+
+ axcbc_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
+ digestsize, ctx);
+
+ ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash digest shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
+ return 0;
+}
static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 keylen)
{
@@ -582,6 +778,25 @@ static int ahash_setkey(struct crypto_ahash *ahash,
return -EINVAL;
}
+static int axcbc_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int ret = 0;
+
+ ctx->key_len = keylen;
+ memcpy(ctx->key, key, keylen);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->key_len, 1);
+#endif
+
+ ret = axcbc_set_sh_desc(ahash);
+
+ return ret;
+}
/*
* ahash_edesc - s/w-extended ahash descriptor
* @dst_dma: physical mapped address of req->result
@@ -604,11 +819,20 @@ static inline void ahash_unmap(struct device *dev,
struct ahash_edesc *edesc,
struct ahash_request *req, int dst_len)
{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ int len;
+
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
if (edesc->dst_dma)
dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
+ len = state->current_buf ? state->buflen_1 : state->buflen_0;
+ if (state->buf_dma && len) {
+ dma_unmap_single(dev, state->buf_dma, len, DMA_TO_DEVICE);
+ state->buf_dma = 0;
+ }
+
if (edesc->sec4_sg_bytes)
dma_unmap_single(dev, edesc->sec4_sg_dma,
edesc->sec4_sg_bytes, DMA_TO_DEVICE);
@@ -1829,6 +2053,29 @@ static struct caam_hash_template driver_hash[] = {
.alg_type = OP_ALG_ALGSEL_MD5,
.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
+ {
+ .name = "xcbc(aes)",
+ .driver_name = "xcbc-aes-caam",
+ .hmac_name = "xcbc(aes)",
+ .hmac_driver_name = "xcbc-aes-caam",
+ .blocksize = XCBC_MAC_BLOCK_WORDS * 4,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = axcbc_setkey,
+ .halg = {
+ .digestsize = XCBC_MAC_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
+ .alg_op = OP_ALG_ALGSEL_AES,
+ },
};
struct caam_hash_alg {
@@ -1878,6 +2125,41 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
return ahash_set_sh_desc(ahash);
}
+static int caam_axcbc_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct crypto_alg *base = tfm->__crt_alg;
+ struct hash_alg_common *halg =
+ container_of(base, struct hash_alg_common, base);
+ struct ahash_alg *alg =
+ container_of(halg, struct ahash_alg, halg);
+ struct caam_hash_alg *caam_hash =
+ container_of(alg, struct caam_hash_alg, ahash_alg);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret = 0;
+
+ /*
+ * Get a Job ring from Job Ring driver to ensure in-order
+ * crypto request processing per tfm
+ */
+ ctx->jrdev = caam_jr_alloc();
+ if (IS_ERR(ctx->jrdev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->jrdev);
+ }
+
+ /* copy descriptor header template value */
+ ctx->alg_type = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
+ ctx->alg_op = OP_TYPE_CLASS1_ALG | caam_hash->alg_op;
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct caam_hash_state));
+
+ ret = axcbc_set_sh_desc(ahash);
+
+ return ret;
+}
+
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
{
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1954,7 +2236,11 @@ caam_hash_alloc(struct caam_hash_template *template,
t_alg->ahash_alg.setkey = NULL;
}
alg->cra_module = THIS_MODULE;
- alg->cra_init = caam_hash_cra_init;
+
+ if (strstr(alg->cra_name, "xcbc") > 0)
+ alg->cra_init = caam_axcbc_cra_init;
+ else
+ alg->cra_init = caam_hash_cra_init;
alg->cra_exit = caam_hash_cra_exit;
alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
alg->cra_priority = CAAM_CRA_PRIORITY;
@@ -2007,8 +2293,14 @@ static int __init caam_algapi_hash_init(void)
* Register crypto algorithms the device supports. First, identify
* presence and attributes of MD block.
*/
- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ if (priv->has_seco) {
+ i = priv->first_jr_index;
+ cha_vid = rd_reg32(&priv->jr[i]->perfmon.cha_id_ls);
+ cha_inst = rd_reg32(&priv->jr[i]->perfmon.cha_num_ls);
+ } else {
+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ }
/*
* Skip registration of any hashing algorithms if MD block
@@ -2049,6 +2341,9 @@ static int __init caam_algapi_hash_init(void)
} else
list_add_tail(&t_alg->entry, &hash_list);
+ if (alg->alg_op == OP_ALG_ALGSEL_AES)
+ continue;
+
/* register unkeyed version */
t_alg = caam_hash_alloc(alg, false);
if (IS_ERR(t_alg)) {