summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig11
-rw-r--r--drivers/crypto/caam/Kconfig82
-rw-r--r--drivers/crypto/caam/Makefile8
-rw-r--r--drivers/crypto/caam/caamalg.c2239
-rw-r--r--drivers/crypto/caam/caamhash.c1969
-rw-r--r--drivers/crypto/caam/caamrng.c381
-rw-r--r--drivers/crypto/caam/compat.h18
-rw-r--r--drivers/crypto/caam/ctrl.c430
-rw-r--r--drivers/crypto/caam/desc.h2310
-rw-r--r--drivers/crypto/caam/desc_constr.h117
-rw-r--r--drivers/crypto/caam/error.c5
-rw-r--r--drivers/crypto/caam/error.h2
-rw-r--r--drivers/crypto/caam/intern.h80
-rw-r--r--drivers/crypto/caam/jr.c89
-rw-r--r--drivers/crypto/caam/key_gen.c124
-rw-r--r--drivers/crypto/caam/key_gen.h17
-rw-r--r--drivers/crypto/caam/regs.h282
-rw-r--r--drivers/crypto/caam/secvio.c310
-rw-r--r--drivers/crypto/caam/secvio.h64
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h165
-rw-r--r--drivers/crypto/caam/sm.h86
-rw-r--r--drivers/crypto/caam/sm_store.c890
-rw-r--r--drivers/crypto/caam/sm_test.c796
-rw-r--r--drivers/crypto/caam/snvsregs.h237
24 files changed, 8920 insertions, 1792 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 13fdb2c3c465..ed37f0a758d8 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -173,6 +173,7 @@ config CRYPTO_DEV_MV_CESA
select CRYPTO_ALGAPI
select CRYPTO_AES
select CRYPTO_BLKCIPHER2
+ select CRYPTO_HASH
help
This driver allows you to utilize the Cryptographic Engines and
Security Accelerator (CESA) which can be found on the Marvell Orion
@@ -293,13 +294,13 @@ config CRYPTO_DEV_S5P
algorithms execution.
config CRYPTO_DEV_DCP
tristate "Support for the DCP engine"
- depends on SOC_IMX28 || SOC_IMX23 || SOC_IMX50
+ depends on SOC_IMX28 || SOC_IMX23 || SOC_IMX50 || SOC_IMX6SL
select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER
help
- Say 'Y' here to use the DCP AES and SHA
- engine for the CryptoAPI algorithms.
- To compile this driver as a module, choose M here: the module
- will be called geode-aes.
+ Say 'Y' here to use the DCP AES and SHA
+ engine for the CryptoAPI algorithms.
+ To compile this driver as a module, choose M here: the module
+ will be called geode-aes.
endif # CRYPTO_HW
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 2d876bb98ff4..bc7fbd450e9e 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,6 +1,6 @@
config CRYPTO_DEV_FSL_CAAM
tristate "Freescale CAAM-Multicore driver backend"
- depends on FSL_SOC
+ depends on FSL_SOC || ARCH_MXC
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -58,7 +58,9 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
threshold. Range is 1-65535.
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
- tristate "Register algorithm implementations with the Crypto API"
+# Forced to non-module for current ARM branch, until CONFIG_OF possible
+# tristate "Register algorithm implementations with the Crypto API"
+ boolean "Register algorithm implementations with the Crypto API"
depends on CRYPTO_DEV_FSL_CAAM
default y
select CRYPTO_ALGAPI
@@ -70,3 +72,79 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
To compile this as a module, choose M here: the module
will be called caamalg.
+
+config CRYPTO_DEV_FSL_CAAM_AHASH_API
+# Forced to non-module for current ARM branch, until CONFIG_OF possible
+# tristate "Register hash algorithm implementations with Crypto API"
+ boolean "Register hash algorithm implementations with Crypto API"
+ depends on CRYPTO_DEV_FSL_CAAM
+ default y
+ select CRYPTO_AHASH
+ help
+ Selecting this will offload ahash for users of the
+ scatterlist crypto API to the SEC4 via job ring.
+
+ To compile this as a module, choose M here: the module
+ will be called caamhash.
+
+config CRYPTO_DEV_FSL_CAAM_RNG_API
+# Forced to non-module for current ARM branch, until CONFIG_OF possible
+# tristate "Register caam device for hwrng API"
+ boolean "Register caam device for hwrng API"
+ depends on CRYPTO_DEV_FSL_CAAM
+ default y
+ select CRYPTO_RNG
+ help
+ Selecting this will register the SEC4 hardware rng to
+ the hw_random API for suppying the kernel entropy pool.
+
+ To compile this as a module, choose M here: the module
+ will be called caamrng.
+
+config CRYPTO_DEV_FSL_CAAM_RNG_TEST
+ boolean "Test caam rng"
+ depends on CRYPTO_DEV_FSL_CAAM_RNG_API
+ default n
+ help
+ Selecting this will enable self-test for caam rng.
+
+config CRYPTO_DEV_FSL_CAAM_SM
+ boolean "CAAM Secure Memory / Keystore API (EXPERIMENTAL)"
+ default n
+ help
+ Enables use of a prototype kernel-level Keystore API with CAAM
+ Secure Memory for insertion/extraction of bus-protected secrets.
+
+config CRYPTO_DEV_FSL_CAAM_SM_SLOTSIZE
+ int "Size of each keystore slot in Secure Memory"
+ depends on CRYPTO_DEV_FSL_CAAM_SM
+ range 5 9
+ default 7
+ help
+ Select size of allocation units to divide Secure Memory pages into
+ (the size of a "slot" as referenced inside the API code).
+ Established as powers of two.
+ Examples:
+ 5 => 32 bytes
+ 6 => 64 bytes
+ 7 => 128 bytes
+ 8 => 256 bytes
+ 9 => 512 bytes
+
+config CRYPTO_DEV_FSL_CAAM_SM_TEST
+ boolean "CAAM Secure Memory - Keystore Test/Example (EXPERIMENTAL)"
+ depends on CRYPTO_DEV_FSL_CAAM_SM
+ default n
+ help
+ Example thread to exercise the Keystore API and to verify that
+ stored and recovered secrets can be used for general purpose
+ encryption/decryption.
+
+config CRYPTO_DEV_FSL_CAAM_SECVIO
+ boolean "CAAM/SNVS Security Violation Handler (EXPERIMENTAL)"
+ depends on CRYPTO_DEV_FSL_CAAM
+ default n
+ help
+ Enables installation of an interrupt handler with registrable
+ handler functions which can be specified to act on the consequences
+ of a security violation. \ No newline at end of file
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index ef39011b4505..36bc651a368d 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -4,5 +4,11 @@
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_SM) += sm_store.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_SM_TEST) += sm_test.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO) += secvio.o
+
+caam-objs := ctrl.o jr.o error.o key_gen.o
-caam-objs := ctrl.o jr.o error.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 676d957c22b0..0f0a9731c941 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1,7 +1,7 @@
/*
* caam - Freescale FSL CAAM support for crypto API
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
*
* Based on talitos crypto API driver.
*
@@ -37,9 +37,10 @@
* | ShareDesc Pointer |
* | SEQ_OUT_PTR |
* | (output buffer) |
+ * | (output length) |
* | SEQ_IN_PTR |
* | (input buffer) |
- * | LOAD (to DECO) |
+ * | (input length) |
* ---------------------
*/
@@ -50,7 +51,10 @@
#include "desc_constr.h"
#include "jr.h"
#include "error.h"
+#include "sg_sw_sec4.h"
+#include "key_gen.h"
+/*#define DEBUG*/
/*
* crypto alg
*/
@@ -62,10 +66,22 @@
#define CAAM_MAX_IV_LENGTH 16
/* length of descriptors text */
-#define DESC_AEAD_SHARED_TEXT_LEN 4
-#define DESC_AEAD_ENCRYPT_TEXT_LEN 21
-#define DESC_AEAD_DECRYPT_TEXT_LEN 24
-#define DESC_AEAD_GIVENCRYPT_TEXT_LEN 27
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
+
+#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+
+#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+ 20 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+ 15 * CAAM_CMD_SZ)
+
+#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
+ CAAM_MAX_KEY_SIZE)
+#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
#ifdef DEBUG
/* for print_hex_dumps with line references */
@@ -76,217 +92,385 @@
#define debug(format, arg...)
#endif
+/* Set DK bit in class 1 operation if shared */
+static inline void append_dec_op1(u32 *desc, u32 type)
+{
+ u32 *jump_cmd, *uncond_jump_cmd;
+
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+ set_jump_tgt_here(desc, uncond_jump_cmd);
+}
+
+/*
+ * Wait for completion of class 1 key loading before allowing
+ * error propagation
+ */
+static inline void append_dec_shr_done(u32 *desc)
+{
+ u32 *jump_cmd;
+
+ jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+}
+
+/*
+ * For aead functions, read payload and write payload,
+ * both of which are specified in req->src and req->dst
+ */
+static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+{
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+ KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+}
+
+/*
+ * For aead encrypt and decrypt, read iv for both classes
+ */
+static inline void aead_append_ld_iv(u32 *desc, int ivsize)
+{
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | ivsize);
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
+}
+
+/*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+ */
+static inline void ablkcipher_append_src_dst(u32 *desc)
+{
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
+ KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+}
+
+/*
+ * If all data, including src (with assoc and iv) or dst (with iv only) are
+ * contiguous
+ */
+#define GIV_SRC_CONTIG 1
+#define GIV_DST_CONTIG (1 << 1)
+
/*
* per-session context
*/
struct caam_ctx {
struct device *jrdev;
- u32 *sh_desc;
- dma_addr_t shared_desc_phys;
+ u32 sh_desc_enc[DESC_MAX_USED_LEN];
+ u32 sh_desc_dec[DESC_MAX_USED_LEN];
+ u32 sh_desc_givenc[DESC_MAX_USED_LEN];
+ dma_addr_t sh_desc_enc_dma;
+ dma_addr_t sh_desc_dec_dma;
+ dma_addr_t sh_desc_givenc_dma;
u32 class1_alg_type;
u32 class2_alg_type;
u32 alg_op;
- u8 *key;
- dma_addr_t key_phys;
+ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t key_dma;
unsigned int enckeylen;
unsigned int split_key_len;
unsigned int split_key_pad_len;
unsigned int authsize;
};
-static int aead_authenc_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
+ int keys_fit_inline)
{
- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
-
- ctx->authsize = authsize;
-
- return 0;
+ if (keys_fit_inline) {
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key_as_imm(desc, (void *)ctx->key +
+ ctx->split_key_pad_len, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ } else {
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ }
}
-struct split_key_result {
- struct completion completion;
- int err;
-};
-
-static void split_key_done(struct device *dev, u32 *desc, u32 err,
- void *context)
+static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
+ int keys_fit_inline)
{
- struct split_key_result *res = context;
+ u32 *key_jump_cmd;
-#ifdef DEBUG
- dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
- if (err) {
- char tmp[CAAM_ERROR_STR_MAX];
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
- dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
- }
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
- res->err = err;
+ append_key_aead(desc, ctx, keys_fit_inline);
- complete(&res->completion);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Propagate errors from shared to job descriptor */
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
}
-/*
-get a split ipad/opad key
-
-Split key generation-----------------------------------------------
-
-[00] 0xb0810008 jobdesc: stidx=1 share=never len=8
-[01] 0x04000014 key: class2->keyreg len=20
- @0xffe01000
-[03] 0x84410014 operation: cls2-op sha1 hmac init dec
-[04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm
-[05] 0xa4000001 jump: class2 local all ->1 [06]
-[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
- @0xffe04000
-*/
-static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen)
+static int aead_set_sh_desc(struct crypto_aead *aead)
{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = 0;
+ u32 *key_jump_cmd, *jump_cmd;
+ u32 geniv, moveiv;
u32 *desc;
- struct split_key_result result;
- dma_addr_t dma_addr_in, dma_addr_out;
- int ret = 0;
- desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+ if (!ctx->enckeylen || !ctx->authsize)
+ return 0;
- init_job_desc(desc, 0);
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = 1;
- dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, dma_addr_in)) {
- dev_err(jrdev, "unable to map key input memory\n");
- kfree(desc);
- return -ENOMEM;
- }
- append_key(desc, dma_addr_in, authkeylen, CLASS_2 |
- KEY_DEST_CLASS_REG);
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
- /* Sets MDHA up into an HMAC-INIT */
- append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT |
- OP_ALG_AS_INIT);
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
- /*
- * do a FIFO_LOAD of zero, this will trigger the internal key expansion
- into both pads inside MDHA
- */
- append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
- /*
- * FIFO_STORE with the explicit split-key content store
- * (0x26 output type)
- */
- dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(jrdev, dma_addr_out)) {
- dev_err(jrdev, "unable to map key output memory\n");
- kfree(desc);
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+
+ /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+ aead_append_ld_iv(desc, tfm->ivsize);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
- append_fifo_store(desc, dma_addr_out, ctx->split_key_len,
- LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
-
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1);
- print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+ print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
#endif
- result.err = 0;
- init_completion(&result.completion);
+ dma_sync_single_for_cpu(jrdev, ctx->sh_desc_enc_dma, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = 1;
- ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
- if (!ret) {
- /* in progress */
- wait_for_completion_interruptible(&result.completion);
- ret = result.err;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len, 1);
-#endif
- }
+ desc = ctx->sh_desc_dec;
- dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len,
- DMA_FROM_DEVICE);
- dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE);
+ /* aead_decrypt shared descriptor */
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
- kfree(desc);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
- return ret;
-}
+ append_key_aead(desc, ctx, keys_fit_inline);
-static int build_sh_desc_ipsec(struct caam_ctx *ctx)
-{
- struct device *jrdev = ctx->jrdev;
- u32 *sh_desc;
- u32 *jump_cmd;
- bool keys_fit_inline = 0;
+ /* Only propagate error immediately if shared */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, key_jump_cmd);
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+ ctx->authsize + tfm->ivsize)
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ aead_append_ld_iv(desc, tfm->ivsize);
+
+ append_dec_op1(desc, ctx->class1_alg_type);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+ append_dec_shr_done(desc);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ dma_sync_single_for_cpu(jrdev, ctx->sh_desc_dec_dma, desc_bytes(desc),
+ DMA_TO_DEVICE);
/*
- * largest Job Descriptor and its Shared Descriptor
- * must both fit into the 64-word Descriptor h/w Buffer
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
*/
- if ((DESC_AEAD_GIVENCRYPT_TEXT_LEN +
- DESC_AEAD_SHARED_TEXT_LEN) * CAAM_CMD_SZ +
- ctx->split_key_pad_len + ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
keys_fit_inline = 1;
- /* build shared descriptor for this session */
- sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN +
- (keys_fit_inline ?
- ctx->split_key_pad_len + ctx->enckeylen :
- CAAM_PTR_SZ * 2), GFP_DMA | GFP_KERNEL);
- if (!sh_desc) {
- dev_err(jrdev, "could not allocate shared descriptor\n");
- return -ENOMEM;
- }
+ /* aead_givencrypt shared descriptor */
+ desc = ctx->sh_desc_givenc;
- init_sh_desc(sh_desc, HDR_SAVECTX | HDR_SHARE_SERIAL);
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
- jump_cmd = append_jump(sh_desc, CLASS_BOTH | JUMP_TEST_ALL |
- JUMP_COND_SHRD | JUMP_COND_SELF);
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO |
+ MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
- /*
- * process keys, starting with class 2/authentication.
- */
- if (keys_fit_inline) {
- append_key_as_imm(sh_desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len,
- CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ /* Copy IV to class 1 context */
+ append_move(desc, MOVE_SRC_CLASS1CTX |
+ MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
- append_key_as_imm(sh_desc, (void *)ctx->key +
- ctx->split_key_pad_len, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- } else {
- append_key(sh_desc, ctx->key_phys, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- append_key(sh_desc, ctx->key_phys + ctx->split_key_pad_len,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- }
+ /* Return to encryption */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* ivsize + cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen = seqinlen - (ivsize + cryptlen) */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Copy iv from class 1 ctx to class 2 fifo*/
+ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Not need to reload iv */
+ append_seq_fifo_load(desc, tfm->ivsize,
+ FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
- /* update jump cmd now that we are at the jump target */
- set_jump_tgt_here(sh_desc, jump_cmd);
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
- ctx->shared_desc_phys = dma_map_single(jrdev, sh_desc,
- desc_bytes(sh_desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->shared_desc_phys)) {
+ ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
- kfree(sh_desc);
return -ENOMEM;
}
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ dma_sync_single_for_cpu(jrdev, ctx->sh_desc_givenc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
+ return 0;
+}
- ctx->sh_desc = sh_desc;
+static int aead_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ aead_set_sh_desc(authenc);
return 0;
}
-static int aead_authenc_setkey(struct crypto_aead *aead,
+static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
+ u32 authkeylen)
+{
+ return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
+ ctx->split_key_pad_len, key_in, authkeylen,
+ ctx->alg_op);
+}
+
+static int aead_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
@@ -326,27 +510,19 @@ static int aead_authenc_setkey(struct crypto_aead *aead,
print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
- ctx->key = kmalloc(ctx->split_key_pad_len + enckeylen,
- GFP_KERNEL | GFP_DMA);
- if (!ctx->key) {
- dev_err(jrdev, "could not allocate key output memory\n");
- return -ENOMEM;
- }
- ret = gen_split_key(ctx, key, authkeylen);
+ ret = gen_split_aead_key(ctx, key, authkeylen);
if (ret) {
- kfree(ctx->key);
goto badkey;
}
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
- ctx->key_phys = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
enckeylen, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->key_phys)) {
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
- kfree(ctx->key);
return -ENOMEM;
}
#ifdef DEBUG
@@ -354,14 +530,16 @@ static int aead_authenc_setkey(struct crypto_aead *aead,
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->split_key_pad_len + enckeylen, 1);
#endif
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->split_key_pad_len + enckeylen,
+ DMA_TO_DEVICE);
ctx->enckeylen = enckeylen;
- ret = build_sh_desc_ipsec(ctx);
+ ret = aead_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_phys, ctx->split_key_pad_len +
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
enckeylen, DMA_TO_DEVICE);
- kfree(ctx->key);
}
return ret;
@@ -370,73 +548,244 @@ badkey:
return -EINVAL;
}
-struct link_tbl_entry {
- u64 ptr;
- u32 len;
- u8 reserved;
- u8 buf_pool_id;
- u16 offset;
-};
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+ u32 *key_jump_cmd, *jump_cmd;
+ u32 *desc;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+ memcpy(ctx->key, key, keylen);
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+ ctx->enckeylen = keylen;
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+
+ /* ablkcipher_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Propagate errors from shared to job descriptor */
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+
+ /* Load iv */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | tfm->ivsize);
+
+ /* Load operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /* ablkcipher_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ /* For aead, only propagate error immediately if shared */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, key_jump_cmd);
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ /* load IV */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | tfm->ivsize);
+
+ /* Choose operation */
+ append_dec_op1(desc, ctx->class1_alg_type);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+ /* Wait for key to load before allowing propagating error */
+ append_dec_shr_done(desc);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
+ return ret;
+}
/*
- * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
+ * aead_edesc - s/w-extended aead descriptor
+ * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
+ * @assoc_chained: if source is chained
* @src_nents: number of segments in input scatterlist
+ * @src_chained: if source is chained
* @dst_nents: number of segments in output scatterlist
- * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
+ * @dst_chained: if destination is chained
+ * @iv_dma: dma address of iv for checking continuity and link table
* @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
- * @link_tbl_bytes: length of dma mapped link_tbl space
- * @link_tbl_dma: bus physical mapped address of h/w link table
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg_dma: bus physical mapped address of h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
-struct ipsec_esp_edesc {
+struct aead_edesc {
int assoc_nents;
+ bool assoc_chained;
int src_nents;
+ bool src_chained;
int dst_nents;
- int link_tbl_bytes;
- dma_addr_t link_tbl_dma;
- struct link_tbl_entry *link_tbl;
+ bool dst_chained;
+ dma_addr_t iv_dma;
+ int sec4_sg_bytes;
+ dma_addr_t sec4_sg_dma;
+ struct sec4_sg_entry *sec4_sg;
u32 hw_desc[0];
};
-static void ipsec_esp_unmap(struct device *dev,
- struct ipsec_esp_edesc *edesc,
- struct aead_request *areq)
-{
- dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
+/*
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @src_chained: if source is chained
+ * @dst_nents: number of segments in output scatterlist
+ * @dst_chained: if destination is chained
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+struct ablkcipher_edesc {
+ int src_nents;
+ bool src_chained;
+ int dst_nents;
+ bool dst_chained;
+ dma_addr_t iv_dma;
+ int sec4_sg_bytes;
+ dma_addr_t sec4_sg_dma;
+ struct sec4_sg_entry *sec4_sg;
+ u32 hw_desc[0];
+};
- if (unlikely(areq->dst != areq->src)) {
- dma_unmap_sg(dev, areq->src, edesc->src_nents,
- DMA_TO_DEVICE);
- dma_unmap_sg(dev, areq->dst, edesc->dst_nents,
- DMA_FROM_DEVICE);
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+ struct scatterlist *dst, int src_nents,
+ bool src_chained, int dst_nents, bool dst_chained,
+ dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
+ int sec4_sg_bytes)
+{
+ if (dst != src) {
+ dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
+ src_chained);
+ dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
+ dst_chained);
} else {
- dma_unmap_sg(dev, areq->src, edesc->src_nents,
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg_chained(dev, src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
}
- if (edesc->link_tbl_bytes)
- dma_unmap_single(dev, edesc->link_tbl_dma,
- edesc->link_tbl_bytes,
+ if (iv_dma)
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+
+ if (sec4_sg_bytes)
+ dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
DMA_TO_DEVICE);
}
-/*
- * ipsec_esp descriptor callbacks
- */
-static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+static void aead_unmap(struct device *dev,
+ struct aead_edesc *edesc,
+ struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ivsize = crypto_aead_ivsize(aead);
+
+ dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
+ DMA_TO_DEVICE, edesc->assoc_chained);
+
+ caam_unmap(dev, req->src, req->dst,
+ edesc->src_nents, edesc->src_chained, edesc->dst_nents,
+ edesc->dst_chained, edesc->iv_dma, ivsize,
+ edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
+}
+
+static void ablkcipher_unmap(struct device *dev,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ caam_unmap(dev, req->src, req->dst,
+ edesc->src_nents, edesc->src_chained, edesc->dst_nents,
+ edesc->dst_chained, edesc->iv_dma, ivsize,
+ edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
+}
+
+static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
- struct aead_request *areq = context;
- struct ipsec_esp_edesc *edesc;
+ struct aead_request *req = context;
+ struct aead_edesc *edesc;
#ifdef DEBUG
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
- int ivsize = crypto_aead_ivsize(aead);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ipsec_esp_edesc *)((char *)desc -
- offsetof(struct ipsec_esp_edesc, hw_desc));
+
+ edesc = (struct aead_edesc *)((char *)desc -
+ offsetof(struct aead_edesc, hw_desc));
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
@@ -444,39 +793,50 @@ static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
- ipsec_esp_unmap(jrdev, edesc, areq);
+ aead_unmap(jrdev, edesc, req);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc),
- areq->assoclen , 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize,
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
edesc->src_nents ? 100 : ivsize, 1);
print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src),
- edesc->src_nents ? 100 : areq->cryptlen +
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->cryptlen +
ctx->authsize + 4, 1);
#endif
kfree(edesc);
- aead_request_complete(areq, err);
+ aead_request_complete(req, err);
}
-static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
- struct aead_request *areq = context;
- struct ipsec_esp_edesc *edesc;
+ struct aead_request *req = context;
+ struct aead_edesc *edesc;
#ifdef DEBUG
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ipsec_esp_edesc *)((char *)desc -
- offsetof(struct ipsec_esp_edesc, hw_desc));
+
+ edesc = (struct aead_edesc *)((char *)desc -
+ offsetof(struct aead_edesc, hw_desc));
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
+ req->cryptlen, 1);
+#endif
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
@@ -484,7 +844,7 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
- ipsec_esp_unmap(jrdev, edesc, areq);
+ aead_unmap(jrdev, edesc, req);
/*
* verify hw auth check passed else return -EBADMSG
@@ -495,400 +855,823 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4,
- ((char *)sg_virt(areq->assoc) - sizeof(struct iphdr)),
- sizeof(struct iphdr) + areq->assoclen +
- ((areq->cryptlen > 1500) ? 1500 : areq->cryptlen) +
+ ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
+ sizeof(struct iphdr) + req->assoclen +
+ ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
ctx->authsize + 36, 1);
- if (!err && edesc->link_tbl_bytes) {
- struct scatterlist *sg = sg_last(areq->src, edesc->src_nents);
+ if (!err && edesc->sec4_sg_bytes) {
+ struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
sg->length + ctx->authsize + 16, 1);
}
#endif
+
kfree(edesc);
- aead_request_complete(areq, err);
+ aead_request_complete(req, err);
}
-/*
- * convert scatterlist to h/w link table format
- * scatterlist must have been previously dma mapped
- */
-static void sg_to_link_tbl(struct scatterlist *sg, int sg_count,
- struct link_tbl_entry *link_tbl_ptr, u32 offset)
+static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ablkcipher_request *req = context;
+ struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ablkcipher_edesc *)((char *)desc -
+ offsetof(struct ablkcipher_edesc, hw_desc));
+
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+
+ ablkcipher_request_complete(req, err);
+}
+
+static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
- while (sg_count) {
- link_tbl_ptr->ptr = sg_dma_address(sg);
- link_tbl_ptr->len = sg_dma_len(sg);
- link_tbl_ptr->reserved = 0;
- link_tbl_ptr->buf_pool_id = 0;
- link_tbl_ptr->offset = offset;
- link_tbl_ptr++;
- sg = sg_next(sg);
- sg_count--;
+ struct ablkcipher_request *req = context;
+ struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ablkcipher_edesc *)((char *)desc -
+ offsetof(struct ablkcipher_edesc, hw_desc));
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
- /* set Final bit (marks end of link table) */
- link_tbl_ptr--;
- link_tbl_ptr->len |= 0x40000000;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+
+ ablkcipher_request_complete(req, err);
}
/*
- * fill in and submit ipsec_esp job descriptor
+ * Fill in aead job descriptor
*/
-static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
- u32 encrypt,
- void (*callback) (struct device *dev, u32 *desc,
- u32 err, void *context))
+static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
+ struct aead_edesc *edesc,
+ struct aead_request *req,
+ bool all_contig, bool encrypt)
{
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- u32 *desc = edesc->hw_desc, options;
- int ret, sg_count, assoc_sg_count;
int ivsize = crypto_aead_ivsize(aead);
int authsize = ctx->authsize;
- dma_addr_t ptr, dst_dma, src_dma;
-#ifdef DEBUG
- u32 *sh_desc = ctx->sh_desc;
+ u32 *desc = edesc->hw_desc;
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, sec4_sg_index = 0;
+#ifdef DEBUG
debug("assoclen %d cryptlen %d authsize %d\n",
- areq->assoclen, areq->cryptlen, authsize);
+ req->assoclen, req->cryptlen, authsize);
print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc),
- areq->assoclen , 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize,
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents ? 100 : ivsize, 1);
print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src),
- edesc->src_nents ? 100 : areq->cryptlen + authsize, 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->cryptlen, 1);
print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
desc_bytes(sh_desc), 1);
#endif
- assoc_sg_count = dma_map_sg(jrdev, areq->assoc, edesc->assoc_nents ?: 1,
- DMA_TO_DEVICE);
- if (areq->src == areq->dst)
- sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1,
- DMA_BIDIRECTIONAL);
- else
- sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1,
- DMA_TO_DEVICE);
- /* start auth operation */
- append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL |
- (encrypt ? : OP_ALG_ICV_ON));
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- /* Load FIFO with data for Class 2 CHA */
- options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG;
- if (!edesc->assoc_nents) {
- ptr = sg_dma_address(areq->assoc);
+ if (all_contig) {
+ src_dma = sg_dma_address(req->assoc);
+ in_options = 0;
} else {
- sg_to_link_tbl(areq->assoc, edesc->assoc_nents,
- edesc->link_tbl, 0);
- ptr = edesc->link_tbl_dma;
- options |= LDST_SGF;
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
+ (edesc->src_nents ? : 1);
+ in_options = LDST_SGF;
}
- append_fifo_load(desc, ptr, areq->assoclen, options);
-
- /* copy iv from cipher/class1 input context to class2 infifo */
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
-
- if (!encrypt) {
- u32 *jump_cmd, *uncond_jump_cmd;
-
- /* JUMP if shared */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
-
- /* start class 1 (cipher) operation, non-shared version */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL);
-
- uncond_jump_cmd = append_jump(desc, 0);
-
- set_jump_tgt_here(desc, jump_cmd);
-
- /* start class 1 (cipher) operation, shared version */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_AAI_DK);
- set_jump_tgt_here(desc, uncond_jump_cmd);
- } else
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | encrypt);
+ if (encrypt)
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
+ req->cryptlen - authsize, in_options);
+ else
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
+ req->cryptlen, in_options);
- /* load payload & instruct to class2 to snoop class 1 if encrypting */
- options = 0;
- if (!edesc->src_nents) {
- src_dma = sg_dma_address(areq->src);
- } else {
- sg_to_link_tbl(areq->src, edesc->src_nents, edesc->link_tbl +
- edesc->assoc_nents, 0);
- src_dma = edesc->link_tbl_dma + edesc->assoc_nents *
- sizeof(struct link_tbl_entry);
- options |= LDST_SGF;
- }
- append_seq_in_ptr(desc, src_dma, areq->cryptlen + authsize, options);
- append_seq_fifo_load(desc, areq->cryptlen, FIFOLD_CLASS_BOTH |
- FIFOLD_TYPE_LASTBOTH |
- (encrypt ? FIFOLD_TYPE_MSG1OUT2
- : FIFOLD_TYPE_MSG));
-
- /* specify destination */
- if (areq->src == areq->dst) {
- dst_dma = src_dma;
+ if (likely(req->src == req->dst)) {
+ if (all_contig) {
+ dst_dma = sg_dma_address(req->src);
+ } else {
+ dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
+ ((edesc->assoc_nents ? : 1) + 1);
+ out_options = LDST_SGF;
+ }
} else {
- sg_count = dma_map_sg(jrdev, areq->dst, edesc->dst_nents ? : 1,
- DMA_FROM_DEVICE);
if (!edesc->dst_nents) {
- dst_dma = sg_dma_address(areq->dst);
- options = 0;
+ dst_dma = sg_dma_address(req->dst);
} else {
- sg_to_link_tbl(areq->dst, edesc->dst_nents,
- edesc->link_tbl + edesc->assoc_nents +
- edesc->src_nents, 0);
- dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents +
- edesc->src_nents) *
- sizeof(struct link_tbl_entry);
- options = LDST_SGF;
+ dst_dma = edesc->sec4_sg_dma +
+ sec4_sg_index *
+ sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
}
}
- append_seq_out_ptr(desc, dst_dma, areq->cryptlen + authsize, options);
- append_seq_fifo_store(desc, areq->cryptlen, FIFOST_TYPE_MESSAGE_DATA);
-
- /* ICV */
if (encrypt)
- append_seq_store(desc, authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
else
- append_seq_fifo_load(desc, authsize, FIFOLD_CLASS_CLASS2 |
- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
+ out_options);
+}
+
+/*
+ * Fill in aead givencrypt job descriptor
+ */
+static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
+ struct aead_edesc *edesc,
+ struct aead_request *req,
+ int contig)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
+ int authsize = ctx->authsize;
+ u32 *desc = edesc->hw_desc;
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, sec4_sg_index = 0;
#ifdef DEBUG
- debug("job_desc_len %d\n", desc_len(desc));
- print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc) , 1);
- print_hex_dump(KERN_ERR, "jdlinkt@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
- edesc->link_tbl_bytes, 1);
+ debug("assoclen %d cryptlen %d authsize %d\n",
+ req->assoclen, req->cryptlen, authsize);
+ print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
+ print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
+ print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
+ print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
+ desc_bytes(sh_desc), 1);
#endif
- ret = caam_jr_enqueue(jrdev, desc, callback, areq);
- if (!ret)
- ret = -EINPROGRESS;
- else {
- ipsec_esp_unmap(jrdev, edesc, areq);
- kfree(edesc);
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (contig & GIV_SRC_CONTIG) {
+ src_dma = sg_dma_address(req->assoc);
+ in_options = 0;
+ } else {
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
+ in_options = LDST_SGF;
}
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
+ req->cryptlen - authsize, in_options);
- return ret;
+ if (contig & GIV_DST_CONTIG) {
+ dst_dma = edesc->iv_dma;
+ } else {
+ if (likely(req->src == req->dst)) {
+ dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
+ edesc->assoc_nents;
+ out_options = LDST_SGF;
+ } else {
+ dst_dma = edesc->sec4_sg_dma +
+ sec4_sg_index *
+ sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
+ }
+ }
+
+ append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
}
/*
- * derive number of elements in scatterlist
+ * Fill in ablkcipher job descriptor
*/
-static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
+static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req,
+ bool iv_contig)
{
- struct scatterlist *sg = sg_list;
- int sg_nents = 0;
-
- *chained = 0;
- while (nbytes > 0) {
- sg_nents++;
- nbytes -= sg->length;
- if (!sg_is_last(sg) && (sg + 1)->length == 0)
- *chained = 1;
- sg = scatterwalk_sg_next(sg);
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ u32 *desc = edesc->hw_desc;
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, sec4_sg_index = 0;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->nbytes, 1);
+#endif
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (iv_contig) {
+ src_dma = edesc->iv_dma;
+ in_options = 0;
+ } else {
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
+ in_options = LDST_SGF;
}
+ append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
- return sg_nents;
+ if (likely(req->src == req->dst)) {
+ if (!edesc->src_nents && iv_contig) {
+ dst_dma = sg_dma_address(req->src);
+ } else {
+ dst_dma = edesc->sec4_sg_dma +
+ sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
+ }
+ } else {
+ if (!edesc->dst_nents) {
+ dst_dma = sg_dma_address(req->dst);
+ } else {
+ dst_dma = edesc->sec4_sg_dma +
+ sec4_sg_index * sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
+ }
+ }
+ append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
}
/*
- * allocate and map the ipsec_esp extended descriptor
+ * allocate and map the aead extended descriptor
*/
-static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
- int desc_bytes)
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ int desc_bytes, bool *all_contig_ptr)
{
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
- GFP_ATOMIC;
- int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes;
- struct ipsec_esp_edesc *edesc;
-
- assoc_nents = sg_count(areq->assoc, areq->assoclen, &chained);
- BUG_ON(chained);
- if (likely(assoc_nents == 1))
- assoc_nents = 0;
-
- src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize,
- &chained);
- BUG_ON(chained);
- if (src_nents == 1)
- src_nents = 0;
-
- if (unlikely(areq->dst != areq->src)) {
- dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize,
- &chained);
- BUG_ON(chained);
- if (dst_nents == 1)
- dst_nents = 0;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int assoc_nents, src_nents, dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ int sgc;
+ bool all_contig = true;
+ bool assoc_chained = false, src_chained = false, dst_chained = false;
+ int ivsize = crypto_aead_ivsize(aead);
+ int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+
+ assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
+ src_nents = sg_count(req->src, req->cryptlen, &src_chained);
+
+ if (unlikely(req->dst != req->src))
+ dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
+
+ sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
+ DMA_BIDIRECTIONAL, assoc_chained);
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
+ } else {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, src_chained);
+ sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE, dst_chained);
}
- link_tbl_bytes = (assoc_nents + src_nents + dst_nents) *
- sizeof(struct link_tbl_entry);
- debug("link_tbl_bytes %d\n", link_tbl_bytes);
+ /* Check if data are contiguous */
+ iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
+ if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
+ iv_dma || src_nents || iv_dma + ivsize !=
+ sg_dma_address(req->src)) {
+ all_contig = false;
+ assoc_nents = assoc_nents ? : 1;
+ src_nents = src_nents ? : 1;
+ sec4_sg_len = assoc_nents + 1 + src_nents;
+ }
+ sec4_sg_len += dst_nents;
+
+ sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+ dma_sync_single_for_device(jrdev, iv_dma, ivsize, DMA_TO_DEVICE);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ipsec_esp_edesc) + desc_bytes +
- link_tbl_bytes, GFP_DMA | flags);
+ edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
+ sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
}
edesc->assoc_nents = assoc_nents;
+ edesc->assoc_chained = assoc_chained;
edesc->src_nents = src_nents;
+ edesc->src_chained = src_chained;
edesc->dst_nents = dst_nents;
- edesc->link_tbl = (void *)edesc + sizeof(struct ipsec_esp_edesc) +
- desc_bytes;
- edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
- link_tbl_bytes, DMA_TO_DEVICE);
- edesc->link_tbl_bytes = link_tbl_bytes;
+ edesc->dst_chained = dst_chained;
+ edesc->iv_dma = iv_dma;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
+ desc_bytes;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ *all_contig_ptr = all_contig;
+
+ sec4_sg_index = 0;
+ if (!all_contig) {
+ sg_to_sec4_sg(req->assoc,
+ (assoc_nents ? : 1),
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += assoc_nents ? : 1;
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+ iv_dma, ivsize, 0);
+ sec4_sg_index += 1;
+ sg_to_sec4_sg_last(req->src,
+ (src_nents ? : 1),
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += src_nents ? : 1;
+ }
+ if (dst_nents) {
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ }
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
return edesc;
}
-static int aead_authenc_encrypt(struct aead_request *areq)
+static int aead_encrypt(struct aead_request *req)
{
- struct ipsec_esp_edesc *edesc;
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- int ivsize = crypto_aead_ivsize(aead);
+ bool all_contig;
u32 *desc;
- dma_addr_t iv_dma;
+ int ret = 0;
+
+ req->cryptlen += ctx->authsize;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_ENCRYPT_TEXT_LEN *
- CAAM_CMD_SZ);
+ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &all_contig);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- desc = edesc->hw_desc;
-
- /* insert shared descriptor pointer */
- init_job_desc_shared(desc, ctx->shared_desc_phys,
- desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
-
- iv_dma = dma_map_single(jrdev, areq->iv, ivsize, DMA_TO_DEVICE);
- /* check dma error */
+ /* Create and submit job descriptor */
+ init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
+ all_contig, true);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
- append_load(desc, iv_dma, ivsize,
- LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
- return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done);
+ return ret;
}
-static int aead_authenc_decrypt(struct aead_request *req)
+static int aead_decrypt(struct aead_request *req)
{
+ struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- struct ipsec_esp_edesc *edesc;
+ bool all_contig;
u32 *desc;
- dma_addr_t iv_dma;
-
- req->cryptlen -= ctx->authsize;
+ int ret = 0;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(req, DESC_AEAD_DECRYPT_TEXT_LEN *
- CAAM_CMD_SZ);
+ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &all_contig);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ req->cryptlen, 1);
+#endif
+
+ /* Create and submit job descriptor*/
+ init_aead_job(ctx->sh_desc_dec,
+ ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
- /* insert shared descriptor pointer */
- init_job_desc_shared(desc, ctx->shared_desc_phys,
- desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
+ return ret;
+}
- iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
- /* check dma error */
+/*
+ * allocate and map the aead extended descriptor for aead givencrypt
+ */
+static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
+ *greq, int desc_bytes,
+ u32 *contig_ptr)
+{
+ struct aead_request *req = &greq->areq;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int assoc_nents, src_nents, dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ int sgc;
+ u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
+ int ivsize = crypto_aead_ivsize(aead);
+ bool assoc_chained = false, src_chained = false, dst_chained = false;
+ int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
- append_load(desc, iv_dma, ivsize,
- LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
+ assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
+ src_nents = sg_count(req->src, req->cryptlen, &src_chained);
- return ipsec_esp(edesc, req, !OP_ALG_ENCRYPT, ipsec_esp_decrypt_done);
+ if (unlikely(req->dst != req->src))
+ dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
+
+ sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
+ DMA_BIDIRECTIONAL, assoc_chained);
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
+ } else {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, src_chained);
+ sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE, dst_chained);
+ }
+
+ /* Check if data are contiguous */
+ iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+ if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
+ iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
+ contig &= ~GIV_SRC_CONTIG;
+ if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
+ contig &= ~GIV_DST_CONTIG;
+ if (unlikely(req->src != req->dst)) {
+ dst_nents = dst_nents ? : 1;
+ sec4_sg_len += 1;
+ }
+ if (!(contig & GIV_SRC_CONTIG)) {
+ assoc_nents = assoc_nents ? : 1;
+ src_nents = src_nents ? : 1;
+ sec4_sg_len += assoc_nents + 1 + src_nents;
+ if (likely(req->src == req->dst))
+ contig &= ~GIV_DST_CONTIG;
+ }
+ sec4_sg_len += dst_nents;
+
+ sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+
+ dma_sync_single_for_device(jrdev, iv_dma, ivsize, DMA_TO_DEVICE);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->assoc_nents = assoc_nents;
+ edesc->assoc_chained = assoc_chained;
+ edesc->src_nents = src_nents;
+ edesc->src_chained = src_chained;
+ edesc->dst_nents = dst_nents;
+ edesc->dst_chained = dst_chained;
+ edesc->iv_dma = iv_dma;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
+ desc_bytes;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ *contig_ptr = contig;
+
+ sec4_sg_index = 0;
+ if (!(contig & GIV_SRC_CONTIG)) {
+ sg_to_sec4_sg(req->assoc, assoc_nents,
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += assoc_nents;
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+ iv_dma, ivsize, 0);
+ sec4_sg_index += 1;
+ sg_to_sec4_sg_last(req->src, src_nents,
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += src_nents;
+ }
+ if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+ iv_dma, ivsize, 0);
+ sec4_sg_index += 1;
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ }
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
+ return edesc;
}
-static int aead_authenc_givencrypt(struct aead_givcrypt_request *req)
+static int aead_givencrypt(struct aead_givcrypt_request *areq)
{
- struct aead_request *areq = &req->areq;
- struct ipsec_esp_edesc *edesc;
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct aead_request *req = &areq->areq;
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- int ivsize = crypto_aead_ivsize(aead);
- dma_addr_t iv_dma;
+ u32 contig;
u32 *desc;
+ int ret = 0;
- iv_dma = dma_map_single(jrdev, req->giv, ivsize, DMA_FROM_DEVICE);
-
- debug("%s: giv %p\n", __func__, req->giv);
+ req->cryptlen += ctx->authsize;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_GIVENCRYPT_TEXT_LEN *
- CAAM_CMD_SZ);
+ edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &contig);
+
if (IS_ERR(edesc))
return PTR_ERR(edesc);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ req->cryptlen, 1);
+#endif
+
+ /* Create and submit job descriptor*/
+ init_aead_giv_job(ctx->sh_desc_givenc,
+ ctx->sh_desc_givenc_dma, edesc, req, contig);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
- /* insert shared descriptor pointer */
- init_job_desc_shared(desc, ctx->shared_desc_phys,
- desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
+ return ret;
+}
- /*
- * LOAD IMM Info FIFO
- * to DECO, Last, Padding, Random, Message, 16 bytes
- */
- append_load_imm_u32(desc, NFIFOENTRY_DEST_DECO | NFIFOENTRY_LC1 |
- NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG |
- NFIFOENTRY_PTYPE_RND | ivsize,
- LDST_SRCDST_WORD_INFO_FIFO);
+/*
+ * allocate and map the ablkcipher extended descriptor for ablkcipher
+ */
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ *req, int desc_bytes,
+ bool *iv_contig_out)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, dst_nents = 0, sec4_sg_bytes;
+ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ bool iv_contig = false;
+ int sgc;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ bool src_chained = false, dst_chained = false;
+ int sec4_sg_index;
+
+ src_nents = sg_count(req->src, req->nbytes, &src_chained);
+
+ if (req->dst != req->src)
+ dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
+
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
+ } else {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, src_chained);
+ sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE, dst_chained);
+ }
+ /* FIXME: no test for sgc values returned above... */
/*
- * disable info fifo entries since the above serves as the entry
- * this way, the MOVE command won't generate an entry.
- * Note that this isn't required in more recent versions of
- * SEC as a MOVE that doesn't do info FIFO entries is available.
+ * Check if iv can be contiguous with source and destination.
+ * If so, include it. If not, create scatterlist.
*/
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, iv_dma, ivsize, DMA_TO_DEVICE);
+ if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
+ iv_contig = true;
+ else
+ src_nents = src_nents ? : 1;
+ sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kzalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
- /* MOVE DECO Alignment -> C1 Context 16 bytes */
- append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | ivsize);
+ edesc->src_nents = src_nents;
+ edesc->src_chained = src_chained;
+ edesc->dst_nents = dst_nents;
+ edesc->dst_chained = dst_chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+ desc_bytes;
+
+ sec4_sg_index = 0;
+ if (!iv_contig) {
+ dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
+ sg_to_sec4_sg_last(req->src, src_nents,
+ edesc->sec4_sg + 1, 0);
+ sec4_sg_index += 1 + src_nents;
+ }
- /* re-enable info fifo entries */
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+ if (dst_nents) {
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ }
- /* MOVE C1 Context -> OFIFO 16 bytes */
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | ivsize);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ edesc->iv_dma = iv_dma;
- append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA);
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
- return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+ sec4_sg_bytes, 1);
+#endif
+
+ *iv_contig_out = iv_contig;
+ return edesc;
}
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ bool iv_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &iv_contig);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Create and submit job descriptor*/
+ init_ablkcipher_job(ctx->sh_desc_enc,
+ ctx->sh_desc_enc_dma, edesc, req, iv_contig);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ bool iv_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &iv_contig);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Create and submit job descriptor*/
+ init_ablkcipher_job(ctx->sh_desc_dec,
+ ctx->sh_desc_dec_dma, edesc, req, iv_contig);
+ desc = edesc->hw_desc;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+#define template_aead template_u.aead
+#define template_ablkcipher template_u.ablkcipher
struct caam_alg_template {
char name[CRYPTO_MAX_ALG_NAME];
char driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
- struct aead_alg aead;
+ u32 type;
+ union {
+ struct ablkcipher_alg ablkcipher;
+ struct aead_alg aead;
+ struct blkcipher_alg blkcipher;
+ struct cipher_alg cipher;
+ struct compress_alg compress;
+ struct rng_alg rng;
+ } template_u;
u32 class1_alg_type;
u32 class2_alg_type;
u32 alg_op;
@@ -897,15 +1680,35 @@ struct caam_alg_template {
static struct caam_alg_template driver_algs[] = {
/* single-pass ipsec_esp descriptor */
{
+ .name = "authenc(hmac(md5),cbc(aes))",
+ .driver_name = "authenc-hmac-md5-cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(sha1),cbc(aes))",
.driver_name = "authenc-hmac-sha1-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -915,15 +1718,36 @@ static struct caam_alg_template driver_algs[] = {
.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
{
+ .name = "authenc(hmac(sha224),cbc(aes))",
+ .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(sha256),cbc(aes))",
.driver_name = "authenc-hmac-sha256-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -934,15 +1758,36 @@ static struct caam_alg_template driver_algs[] = {
.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
{
+ .name = "authenc(hmac(sha384),cbc(aes))",
+ .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(sha512),cbc(aes))",
.driver_name = "authenc-hmac-sha512-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -953,15 +1798,35 @@ static struct caam_alg_template driver_algs[] = {
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
{
+ .name = "authenc(hmac(md5),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(sha1),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -971,15 +1836,36 @@ static struct caam_alg_template driver_algs[] = {
.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
{
+ .name = "authenc(hmac(sha224),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(sha256),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -990,15 +1876,36 @@ static struct caam_alg_template driver_algs[] = {
.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
{
+ .name = "authenc(hmac(sha384),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(sha512),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -1009,15 +1916,35 @@ static struct caam_alg_template driver_algs[] = {
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
{
+ .name = "authenc(hmac(md5),cbc(des))",
+ .driver_name = "authenc-hmac-md5-cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(sha1),cbc(des))",
.driver_name = "authenc-hmac-sha1-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -1027,15 +1954,36 @@ static struct caam_alg_template driver_algs[] = {
.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
{
+ .name = "authenc(hmac(sha224),cbc(des))",
+ .driver_name = "authenc-hmac-sha224-cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(sha256),cbc(des))",
.driver_name = "authenc-hmac-sha256-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -1046,15 +1994,36 @@ static struct caam_alg_template driver_algs[] = {
.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
{
+ .name = "authenc(hmac(sha384),cbc(des))",
+ .driver_name = "authenc-hmac-sha384-cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(sha512),cbc(des))",
.driver_name = "authenc-hmac-sha512-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -1064,6 +2033,55 @@ static struct caam_alg_template driver_algs[] = {
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
+ /* ablkcipher descriptor */
+ {
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-3des-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des)",
+ .driver_name = "cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ }
};
struct caam_crypto_alg {
@@ -1102,38 +2120,29 @@ static void caam_cra_exit(struct crypto_tfm *tfm)
{
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- if (!dma_mapping_error(ctx->jrdev, ctx->shared_desc_phys))
- dma_unmap_single(ctx->jrdev, ctx->shared_desc_phys,
- desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
- kfree(ctx->sh_desc);
-
- if (!dma_mapping_error(ctx->jrdev, ctx->key_phys))
- dma_unmap_single(ctx->jrdev, ctx->key_phys,
- ctx->split_key_pad_len + ctx->enckeylen,
+ if (ctx->sh_desc_enc_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
+ if (ctx->sh_desc_dec_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
+ if (ctx->sh_desc_givenc_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
+ desc_bytes(ctx->sh_desc_givenc),
DMA_TO_DEVICE);
- kfree(ctx->key);
}
-static void __exit caam_algapi_exit(void)
+void caam_algapi_shutdown(struct platform_device *pdev)
{
-
- struct device_node *dev_node;
- struct platform_device *pdev;
struct device *ctrldev;
struct caam_drv_private *priv;
struct caam_crypto_alg *t_alg, *n;
int i, err;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node)
- return;
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return;
-
ctrldev = &pdev->dev;
- of_node_put(dev_node);
priv = dev_get_drvdata(ctrldev);
if (!priv->alg_list.next)
@@ -1152,6 +2161,7 @@ static void __exit caam_algapi_exit(void)
}
kfree(priv->algapi_jr);
}
+EXPORT_SYMBOL_GPL(caam_algapi_shutdown);
static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
struct caam_alg_template
@@ -1175,12 +2185,20 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
alg->cra_init = caam_cra_init;
alg->cra_exit = caam_cra_exit;
alg->cra_priority = CAAM_CRA_PRIORITY;
- alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_type = &crypto_aead_type;
alg->cra_ctxsize = sizeof(struct caam_ctx);
- alg->cra_u.aead = template->aead;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | template->type;
+ switch (template->type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_ablkcipher = template->template_ablkcipher;
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ alg->cra_type = &crypto_aead_type;
+ alg->cra_aead = template->template_aead;
+ break;
+ }
t_alg->class1_alg_type = template->class1_alg_type;
t_alg->class2_alg_type = template->class2_alg_type;
@@ -1190,26 +2208,16 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
return t_alg;
}
-static int __init caam_algapi_init(void)
+int caam_algapi_startup(struct platform_device *pdev)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
struct device *ctrldev, **jrdev;
struct caam_drv_private *priv;
- int i = 0, err = 0;
-
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node)
- return -ENODEV;
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev)
- return -ENODEV;
+ int i = 0, err = 0, md_limit = 0;
+ int des_inst, aes_inst, md_inst;
+ u64 cha_inst;
ctrldev = &pdev->dev;
priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
-
INIT_LIST_HEAD(&priv->alg_list);
jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL);
@@ -1232,11 +2240,47 @@ static int __init caam_algapi_init(void)
priv->algapi_jr = jrdev;
atomic_set(&priv->tfm_count, -1);
- /* register crypto algorithms the device supports */
+ /*
+ * register crypto algorithms the device supports
+ * first, detect presence of DES, AES, and MD blocks. If MD present,
+ * determine limit of supported digest size
+ */
+ cha_inst = rd_reg64(&priv->ctrl->perfmon.cha_num);
+ des_inst = (cha_inst & CHA_ID_DES_MASK) >> CHA_ID_DES_SHIFT;
+ aes_inst = (cha_inst & CHA_ID_AES_MASK) >> CHA_ID_AES_SHIFT;
+ md_inst = (cha_inst & CHA_ID_MD_MASK) >> CHA_ID_MD_SHIFT;
+ if (md_inst) {
+ md_limit = SHA512_DIGEST_SIZE;
+ if ((rd_reg64(&priv->ctrl->perfmon.cha_id) & CHA_ID_MD_MASK)
+ == CHA_ID_MD_LP256) /* LP256 limits digest size */
+ md_limit = SHA256_DIGEST_SIZE;
+ }
+
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- /* TODO: check if h/w supports alg */
struct caam_crypto_alg *t_alg;
+ /*
+ * All registrable algs in this module require a blockcipher
+ * All aead algs require message digests, so check them for
+ * instantiation and size.
+ */
+ if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD) {
+ /* If no MD instantiated, or MD too small, skip */
+ if ((!md_inst) ||
+ (driver_algs[i].template_aead.maxauthsize >
+ md_limit))
+ continue;
+ }
+ /* If DES alg, and CHA not instantiated, skip */
+ if ((driver_algs[i].class1_alg_type & OP_ALG_ALGSEL_3DES) ||
+ (driver_algs[i].class1_alg_type & OP_ALG_ALGSEL_DES))
+ if (!des_inst)
+ continue;
+ /* If AES alg, and CHA not instantiated, skip */
+ if (driver_algs[i].class1_alg_type & OP_ALG_ALGSEL_AES)
+ if (!aes_inst)
+ continue;
+
t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
@@ -1259,6 +2303,48 @@ static int __init caam_algapi_init(void)
return err;
}
+EXPORT_SYMBOL_GPL(caam_algapi_startup);
+
+#ifdef CONFIG_OF
+static void __exit caam_algapi_exit(void)
+{
+
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node)
+ return;
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev)
+ return;
+
+ caam_algapi_shutdown(pdev);
+
+ of_node_put(dev_node);
+}
+
+static int __init caam_algapi_init(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ int stat;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node)
+ return -ENODEV;
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev)
+ return -ENODEV;
+
+ stat = caam_algapi_startup(pdev);
+
+ of_node_put(dev_node);
+
+ return stat;
+}
module_init(caam_algapi_init);
module_exit(caam_algapi_exit);
@@ -1266,3 +2352,4 @@ module_exit(caam_algapi_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM support for crypto API");
MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
+#endif
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
new file mode 100644
index 000000000000..93f42a220a32
--- /dev/null
+++ b/drivers/crypto/caam/caamhash.c
@@ -0,0 +1,1969 @@
+/*
+ * caam - Freescale FSL CAAM support for ahash functions of crypto API
+ *
+ * Copyright (C) 2011-2012 Freescale Semiconductor, Inc.
+ *
+ * Based on caamalg.c crypto API driver.
+ *
+ * relationship of digest job descriptor or first job descriptor after init to
+ * shared descriptors:
+ *
+ * --------------- ---------------
+ * | JobDesc #1 |-------------------->| ShareDesc |
+ * | *(packet 1) | | (hashKey) |
+ * --------------- | (operation) |
+ * ---------------
+ *
+ * relationship of subsequent job descriptors to shared descriptors:
+ *
+ * --------------- ---------------
+ * | JobDesc #2 |-------------------->| ShareDesc |
+ * | *(packet 2) | |------------->| (hashKey) |
+ * --------------- | |-------->| (operation) |
+ * . | | | (load ctx2) |
+ * . | | ---------------
+ * --------------- | |
+ * | JobDesc #3 |------| |
+ * | *(packet 3) | |
+ * --------------- |
+ * . |
+ * . |
+ * --------------- |
+ * | JobDesc #4 |------------
+ * | *(packet 4) |
+ * ---------------
+ *
+ * The SharedDesc never changes for a connection unless rekeyed, but
+ * each packet will likely be in a different place. So all we need
+ * to know to process the packet is where the input is, where the
+ * output goes, and what context we want to process with. Context is
+ * in the SharedDesc, packet references in the JobDesc.
+ *
+ * So, a job desc looks like:
+ *
+ * ---------------------
+ * | Header |
+ * | ShareDesc Pointer |
+ * | SEQ_OUT_PTR |
+ * | (output buffer) |
+ * | (output length) |
+ * | SEQ_IN_PTR |
+ * | (input buffer) |
+ * | (input length) |
+ * ---------------------
+ */
+
+#include "compat.h"
+
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+#include "sg_sw_sec4.h"
+#include "key_gen.h"
+
+#define CAAM_CRA_PRIORITY 3000
+
+/* max hash key is max split key size */
+#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
+
+#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
+#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+
+/* length of descriptors text */
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
+
+#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+
+#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
+ CAAM_MAX_HASH_KEY_SIZE)
+#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+/* caam context sizes for hashes: running digest + 8 */
+#define HASH_MSG_LEN 8
+#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
+
+#ifdef DEBUG
+/* for print_hex_dumps with line references */
+#define xstr(s) str(s)
+#define str(s) #s
+#define debug(format, arg...) printk(format, arg)
+#else
+#define debug(format, arg...)
+#endif
+
+/* ahash per-session context */
+struct caam_hash_ctx {
+ struct device *jrdev;
+ u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
+ u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
+ u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
+ u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
+ u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
+ dma_addr_t sh_desc_update_dma;
+ dma_addr_t sh_desc_update_first_dma;
+ dma_addr_t sh_desc_fin_dma;
+ dma_addr_t sh_desc_digest_dma;
+ dma_addr_t sh_desc_finup_dma;
+ u32 alg_type;
+ u32 alg_op;
+ u8 key[CAAM_MAX_HASH_KEY_SIZE];
+ dma_addr_t key_dma;
+ int ctx_len;
+ unsigned int split_key_len;
+ unsigned int split_key_pad_len;
+};
+
+/* ahash state */
+struct caam_hash_state {
+ dma_addr_t buf_dma;
+ dma_addr_t ctx_dma;
+ u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen_0;
+ u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen_1;
+ u8 caam_ctx[MAX_CTX_LEN];
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+ int current_buf;
+};
+
+/* Common job descriptor seq in/out ptr routines */
+
+/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
+static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
+ struct caam_hash_state *state,
+ int ctx_len)
+{
+ state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
+ ctx_len, DMA_FROM_DEVICE);
+ append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
+}
+
+/* Map req->result, and append seq_out_ptr command that points to it */
+static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
+ u8 *result, int digestsize)
+{
+ dma_addr_t dst_dma;
+
+ dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
+ append_seq_out_ptr(desc, dst_dma, digestsize, 0);
+
+ return dst_dma;
+}
+
+/* Map current buffer in state and put it in link table */
+static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
+ struct sec4_sg_entry *sec4_sg,
+ u8 *buf, int buflen)
+{
+ dma_addr_t buf_dma;
+
+ buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, buf_dma, buflen, DMA_TO_DEVICE);
+ dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
+
+ return buf_dma;
+}
+
+/* Map req->src and put it in link table */
+static inline void src_map_to_sec4_sg(struct device *jrdev,
+ struct scatterlist *src, int src_nents,
+ struct sec4_sg_entry *sec4_sg,
+ bool chained)
+{
+ dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
+ sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
+}
+
+/*
+ * Only put buffer in link table if it contains data, which is possible,
+ * since a buffer has previously been used, and needs to be unmapped,
+ */
+static inline dma_addr_t
+try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
+ u8 *buf, dma_addr_t buf_dma, int buflen,
+ int last_buflen)
+{
+ if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
+ dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
+ if (buflen)
+ buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
+ else
+ buf_dma = 0;
+
+ return buf_dma;
+}
+
+/* Map state->caam_ctx, and add it to link table */
+static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
+ struct caam_hash_state *state,
+ int ctx_len,
+ struct sec4_sg_entry *sec4_sg,
+ u32 flag)
+{
+ state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
+ if ((flag == DMA_TO_DEVICE) || (flag == DMA_BIDIRECTIONAL))
+ dma_sync_single_for_device(jrdev, state->ctx_dma, ctx_len,
+ flag);
+ dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
+}
+
+/* Common shared descriptor commands */
+static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
+{
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+}
+
+/* Append key if it has been set */
+static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ if (ctx->split_key_len) {
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ append_key_ahash(desc, ctx);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+ }
+
+ /* Propagate errors from shared to job descriptor */
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+}
+
+/*
+ * For ahash read data from seqin following state->caam_ctx,
+ * and write resulting class2 context to seqout, which may be state->caam_ctx
+ * or req->result
+ */
+static inline void ahash_append_load_str(u32 *desc, int digestsize)
+{
+ /* Calculate remaining bytes to read */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+ FIFOLD_TYPE_MSG | KEY_VLF);
+
+ /* Store class2 context bytes */
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+}
+
+/*
+ * For ahash update, final and finup, import context, read and write to seqout
+ */
+static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
+ int digestsize,
+ struct caam_hash_ctx *ctx)
+{
+ init_sh_desc_key_ahash(desc, ctx);
+
+ /* Import context from software */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_2_CCB | ctx->ctx_len);
+
+ /* Class 2 operation */
+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ */
+ ahash_append_load_str(desc, digestsize);
+}
+
+/* For ahash firsts and digest, read and write to seqout */
+static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
+ int digestsize, struct caam_hash_ctx *ctx)
+{
+ init_sh_desc_key_ahash(desc, ctx);
+
+ /* Class 2 operation */
+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ */
+ ahash_append_load_str(desc, digestsize);
+}
+
+static int ahash_set_sh_desc(struct crypto_ahash *ahash)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct device *jrdev = ctx->jrdev;
+ u32 have_key = 0;
+ u32 *desc;
+
+ if (ctx->split_key_len)
+ have_key = OP_ALG_AAI_HMAC_PRECOMP;
+
+ /* ahash_update shared descriptor */
+ desc = ctx->sh_desc_update;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Import context from software */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_2_CCB | ctx->ctx_len);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
+ OP_ALG_ENCRYPT);
+
+ /* Load data and write to result or context */
+ ahash_append_load_str(desc, ctx->ctx_len);
+
+ ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ /* ahash_update_first shared descriptor */
+ desc = ctx->sh_desc_update_first;
+
+ ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
+ ctx->ctx_len, ctx);
+
+ ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /* ahash_final shared descriptor */
+ desc = ctx->sh_desc_fin;
+
+ ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
+ OP_ALG_AS_FINALIZE, digestsize, ctx);
+
+ ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /* ahash_finup shared descriptor */
+ desc = ctx->sh_desc_finup;
+
+ ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
+ OP_ALG_AS_FINALIZE, digestsize, ctx);
+
+ ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_finup_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /* ahash_digest shared descriptor */
+ desc = ctx->sh_desc_digest;
+
+ ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
+ digestsize, ctx);
+
+ ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
+ return 0;
+}
+
+static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+ u32 keylen)
+{
+ return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
+ ctx->split_key_pad_len, key_in, keylen,
+ ctx->alg_op);
+}
+
+/* Digest hash size if it is too large */
+static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+ u32 *keylen, u8 *key_out, u32 digestsize)
+{
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc;
+ struct split_key_result result;
+ dma_addr_t src_dma, dst_dma;
+ int ret = 0;
+
+ /*
+ * Hashing descriptor is 6 commands (including header), 2 pointers,
+ * and 2 extended lengths
+ */
+ desc = kmalloc((CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2 +
+ CAAM_EXTLEN_SZ * 2),
+ GFP_KERNEL | GFP_DMA);
+
+ init_job_desc(desc, 0);
+
+ src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, src_dma)) {
+ dev_err(jrdev, "unable to map key input memory\n");
+ kfree(desc);
+ return -ENOMEM;
+ }
+ dma_sync_single_for_device(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+
+ dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dst_dma)) {
+ dev_err(jrdev, "unable to map key output memory\n");
+ dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+ kfree(desc);
+ return -ENOMEM;
+ }
+
+ /* Job descriptor to perform unkeyed hash on key_in */
+ append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
+ OP_ALG_AS_INITFINAL);
+ append_seq_in_ptr(desc, src_dma, *keylen, 0);
+ append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
+ append_seq_out_ptr(desc, dst_dma, digestsize, 0);
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ result.err = 0;
+ init_completion(&result.completion);
+
+ ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+ if (!ret) {
+ /* in progress */
+ wait_for_completion_interruptible(&result.completion);
+ ret = result.err;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in,
+ digestsize, 1);
+#endif
+ }
+ *keylen = digestsize;
+
+ dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
+ dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
+
+ kfree(desc);
+
+ return ret;
+}
+
+static int ahash_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *jrdev = ctx->jrdev;
+ int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int ret = 0;
+ u8 *hashed_key = NULL;
+#ifdef DEBUG
+ printk(KERN_ERR "keylen %d\n", keylen);
+#endif
+
+ if (keylen > blocksize) {
+ hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
+ GFP_DMA);
+ if (!hashed_key)
+ return -ENOMEM;
+ ret = hash_digest_key(ctx, key, &keylen, hashed_key,
+ digestsize);
+ if (ret)
+ goto badkey;
+ key = hashed_key;
+ }
+
+ /* Pick class 2 key length from algorithm submask */
+ ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT] * 2;
+ ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+
+#ifdef DEBUG
+ printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
+ ctx->split_key_len, ctx->split_key_pad_len);
+ print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ ret = gen_split_hash_key(ctx, key, keylen);
+ if (ret)
+ goto badkey;
+
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->split_key_pad_len,
+ DMA_TO_DEVICE);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->split_key_pad_len, 1);
+#endif
+
+ ret = ahash_set_sh_desc(ahash);
+ if (ret) {
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
+ DMA_TO_DEVICE);
+ }
+
+ kfree(hashed_key);
+ return ret;
+badkey:
+ kfree(hashed_key);
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+/*
+ * ahash_edesc - s/w-extended ahash descriptor
+ * @dst_dma: physical mapped address of req->result
+ * @sec4_sg_dma: physical mapped address of h/w link table
+ * @chained: if source is chained
+ * @src_nents: number of segments in input scatterlist
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg: pointer to h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+struct ahash_edesc {
+ dma_addr_t dst_dma;
+ dma_addr_t sec4_sg_dma;
+ bool chained;
+ int src_nents;
+ int sec4_sg_bytes;
+ struct sec4_sg_entry *sec4_sg;
+ u32 hw_desc[0];
+};
+
+static inline void ahash_unmap(struct device *dev,
+ struct ahash_edesc *edesc,
+ struct ahash_request *req, int dst_len)
+{
+ if (edesc->src_nents)
+ dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
+ DMA_TO_DEVICE, edesc->chained);
+ if (edesc->dst_dma) {
+ dma_sync_single_for_cpu(dev, edesc->dst_dma, dst_len,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
+ }
+
+ if (edesc->sec4_sg_bytes)
+ dma_unmap_single(dev, edesc->sec4_sg_dma,
+ edesc->sec4_sg_bytes, DMA_TO_DEVICE);
+}
+
+static inline void ahash_unmap_ctx(struct device *dev,
+ struct ahash_edesc *edesc,
+ struct ahash_request *req, int dst_len, u32 flag)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ if (state->ctx_dma) {
+ if ((flag == DMA_FROM_DEVICE) || (flag == DMA_BIDIRECTIONAL))
+ dma_sync_single_for_cpu(dev, state->ctx_dma,
+ ctx->ctx_len, flag);
+ dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+ }
+ ahash_unmap(dev, edesc, req, dst_len);
+}
+
+static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ahash_request *req = context;
+ struct ahash_edesc *edesc;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ int digestsize = crypto_ahash_digestsize(ahash);
+#ifdef DEBUG
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ahash_edesc *)((char *)desc -
+ offsetof(struct ahash_edesc, hw_desc));
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+#endif
+
+ req->base.complete(&req->base, err);
+}
+
+static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ahash_request *req = context;
+ struct ahash_edesc *edesc;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+#ifdef DEBUG
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ int digestsize = crypto_ahash_digestsize(ahash);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ahash_edesc *)((char *)desc -
+ offsetof(struct ahash_edesc, hw_desc));
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ kfree(edesc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+#endif
+
+ req->base.complete(&req->base, err);
+}
+
+static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ahash_request *req = context;
+ struct ahash_edesc *edesc;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ int digestsize = crypto_ahash_digestsize(ahash);
+#ifdef DEBUG
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ahash_edesc *)((char *)desc -
+ offsetof(struct ahash_edesc, hw_desc));
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ kfree(edesc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+#endif
+
+ req->base.complete(&req->base, err);
+}
+
+static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ahash_request *req = context;
+ struct ahash_edesc *edesc;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+#ifdef DEBUG
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ int digestsize = crypto_ahash_digestsize(ahash);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ahash_edesc *)((char *)desc -
+ offsetof(struct ahash_edesc, hw_desc));
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ kfree(edesc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+#endif
+
+ req->base.complete(&req->base, err);
+}
+
+/* submit update job descriptor */
+static int ahash_update_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
+ u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
+ int *next_buflen = state->current_buf ? &state->buflen_0 :
+ &state->buflen_1, last_buflen;
+ int in_len = *buflen + req->nbytes, to_hash;
+ u32 *sh_desc = ctx->sh_desc_update, *desc;
+ dma_addr_t ptr = ctx->sh_desc_update_dma;
+ int src_nents, sec4_sg_bytes, sec4_sg_src_index;
+ struct ahash_edesc *edesc;
+ bool chained = false;
+ int ret = 0;
+ int sh_len;
+
+ last_buflen = *next_buflen;
+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ to_hash = in_len - *next_buflen;
+
+ if (to_hash) {
+ src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
+ &chained);
+ sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
+ sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /*
+ * allocate space for base edesc and hw desc commands,
+ * link tables
+ */
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev,
+ "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
+ ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_BIDIRECTIONAL);
+
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
+ edesc->sec4_sg + 1,
+ buf, state->buf_dma,
+ *buflen, last_buflen);
+
+ if (src_nents) {
+ src_map_to_sec4_sg(jrdev, req->src, src_nents,
+ edesc->sec4_sg + sec4_sg_src_index,
+ chained);
+ if (*next_buflen) {
+ sg_copy_part(next_buf, req->src, to_hash -
+ *buflen, req->nbytes);
+ state->current_buf = !state->current_buf;
+ }
+ } else {
+ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
+ SEC4_SG_LEN_FIN;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
+ HDR_REVERSE);
+
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
+ to_hash, LDST_SGF);
+
+ append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
+
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
+ DMA_BIDIRECTIONAL);
+ kfree(edesc);
+ }
+ } else if (*next_buflen) {
+ sg_copy(buf + *buflen, req->src, req->nbytes);
+ *buflen = *next_buflen;
+ *next_buflen = last_buflen;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+ *next_buflen, 1);
+#endif
+
+ return ret;
+}
+
+static int ahash_final_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+ int last_buflen = state->current_buf ? state->buflen_0 :
+ state->buflen_1;
+ u32 *sh_desc = ctx->sh_desc_fin, *desc;
+ dma_addr_t ptr = ctx->sh_desc_fin_dma;
+ int sec4_sg_bytes;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ int ret = 0;
+ int sh_len;
+
+ sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ edesc->src_nents = 0;
+
+ ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
+ DMA_TO_DEVICE);
+
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
+ buf, state->buf_dma, buflen,
+ last_buflen);
+ (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
+ LDST_SGF);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int ahash_finup_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+ int last_buflen = state->current_buf ? state->buflen_0 :
+ state->buflen_1;
+ u32 *sh_desc = ctx->sh_desc_finup, *desc;
+ dma_addr_t ptr = ctx->sh_desc_finup_dma;
+ int sec4_sg_bytes, sec4_sg_src_index;
+ int src_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ bool chained = false;
+ int ret = 0;
+ int sh_len;
+
+ src_nents = __sg_count(req->src, req->nbytes, &chained);
+ sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+ sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+
+ ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
+ DMA_TO_DEVICE);
+
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
+ buf, state->buf_dma, buflen,
+ last_buflen);
+
+ src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
+ sec4_sg_src_index, chained);
+
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
+ buflen + req->nbytes, LDST_SGF);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int ahash_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u32 *sh_desc = ctx->sh_desc_digest, *desc;
+ dma_addr_t ptr = ctx->sh_desc_digest_dma;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int src_nents, sec4_sg_bytes;
+ dma_addr_t src_dma;
+ struct ahash_edesc *edesc;
+ bool chained = false;
+ int ret = 0;
+ u32 options;
+ int sh_len;
+
+ src_nents = sg_count(req->src, req->nbytes, &chained);
+ dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
+ chained);
+ sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kzalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
+ DESC_JOB_IO_LEN, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (src_nents) {
+ sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+ src_dma = edesc->sec4_sg_dma;
+ options = LDST_SGF;
+ } else {
+ src_dma = sg_dma_address(req->src);
+ options = 0;
+ }
+ append_seq_in_ptr(desc, src_dma, req->nbytes, options);
+
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma,
+ edesc->sec4_sg_bytes, DMA_TO_DEVICE);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+/* submit ahash final if it the first job descriptor */
+static int ahash_final_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+ u32 *sh_desc = ctx->sh_desc_digest, *desc;
+ dma_addr_t ptr = ctx->sh_desc_digest_dma;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ int ret = 0;
+ int sh_len;
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
+ GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+
+ append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+ edesc->src_nents = 0;
+
+ dma_sync_single_for_device(jrdev, state->buf_dma, buflen,
+ DMA_TO_DEVICE);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+/* submit ahash update if it the first job descriptor after update */
+static int ahash_update_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
+ u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
+ int *next_buflen = state->current_buf ? &state->buflen_0 :
+ &state->buflen_1;
+ int in_len = *buflen + req->nbytes, to_hash;
+ int sec4_sg_bytes, src_nents;
+ struct ahash_edesc *edesc;
+ u32 *desc, *sh_desc = ctx->sh_desc_update_first;
+ dma_addr_t ptr = ctx->sh_desc_update_first_dma;
+ bool chained = false;
+ int ret = 0;
+ int sh_len;
+
+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ to_hash = in_len - *next_buflen;
+
+ if (to_hash) {
+ src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
+ &chained);
+ sec4_sg_bytes = (1 + src_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /*
+ * allocate space for base edesc and hw desc commands,
+ * link tables
+ */
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev,
+ "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
+ state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
+ buf, *buflen);
+ src_map_to_sec4_sg(jrdev, req->src, src_nents,
+ edesc->sec4_sg + 1, chained);
+ if (*next_buflen) {
+ sg_copy_part(next_buf, req->src, to_hash - *buflen,
+ req->nbytes);
+ state->current_buf = !state->current_buf;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
+ HDR_REVERSE);
+
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
+
+ map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
+ } else {
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
+ DMA_TO_DEVICE);
+ kfree(edesc);
+ }
+ } else if (*next_buflen) {
+ sg_copy(buf + *buflen, req->src, req->nbytes);
+ *buflen = *next_buflen;
+ *next_buflen = 0;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+ *next_buflen, 1);
+#endif
+
+ return ret;
+}
+
+/* submit ahash finup if it the first job descriptor after update */
+static int ahash_finup_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+ int last_buflen = state->current_buf ? state->buflen_0 :
+ state->buflen_1;
+ u32 *sh_desc = ctx->sh_desc_digest, *desc;
+ dma_addr_t ptr = ctx->sh_desc_digest_dma;
+ int sec4_sg_bytes, sec4_sg_src_index, src_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ bool chained = false;
+ int sh_len;
+ int ret = 0;
+
+ src_nents = __sg_count(req->src, req->nbytes, &chained);
+ sec4_sg_src_index = 2;
+ sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
+ state->buf_dma, buflen,
+ last_buflen);
+
+ src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
+ chained);
+
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
+ req->nbytes, LDST_SGF);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+/* submit first update job descriptor after init */
+static int ahash_update_first(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *next_buf = state->buf_0 + state->current_buf *
+ CAAM_MAX_HASH_BLOCK_SIZE;
+ int *next_buflen = &state->buflen_0 + state->current_buf;
+ int to_hash;
+ u32 *sh_desc = ctx->sh_desc_update_first, *desc;
+ dma_addr_t ptr = ctx->sh_desc_update_first_dma;
+ int sec4_sg_bytes, src_nents;
+ dma_addr_t src_dma;
+ u32 options;
+ struct ahash_edesc *edesc;
+ bool chained = false;
+ int ret = 0;
+ int sh_len;
+
+ *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
+ 1);
+ to_hash = req->nbytes - *next_buflen;
+
+ if (to_hash) {
+ src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
+ &chained);
+ dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, chained);
+ sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
+
+ /*
+ * allocate space for base edesc and hw desc commands,
+ * link tables
+ */
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev,
+ "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
+ if (src_nents) {
+ sg_to_sec4_sg_last(req->src, src_nents,
+ edesc->sec4_sg, 0);
+ src_dma = edesc->sec4_sg_dma;
+ options = LDST_SGF;
+ } else {
+ src_dma = sg_dma_address(req->src);
+ options = 0;
+ }
+
+ if (*next_buflen)
+ sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
+ HDR_REVERSE);
+
+ append_seq_in_ptr(desc, src_dma, to_hash, options);
+
+ map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
+ req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
+ } else {
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
+ DMA_TO_DEVICE);
+ kfree(edesc);
+ }
+ } else if (*next_buflen) {
+ state->update = ahash_update_no_ctx;
+ state->finup = ahash_finup_no_ctx;
+ state->final = ahash_final_no_ctx;
+ sg_copy(next_buf, req->src, req->nbytes);
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+ *next_buflen, 1);
+#endif
+
+ return ret;
+}
+
+static int ahash_finup_first(struct ahash_request *req)
+{
+ return ahash_digest(req);
+}
+
+static int ahash_init(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ /* ENGR00233800: CAAM: running sha_speed in cryptodev crashed */
+ memset(state, 0, sizeof(struct caam_hash_state));
+
+ state->update = ahash_update_first;
+ state->finup = ahash_finup_first;
+ state->final = ahash_final_no_ctx;
+ /* ENGR00233800: CAAM: running sha_speed in cryptodev crashed */
+/* state->current_buf = 0;
+ state->buflen_0 = 0;
+ state->buflen_1 = 0;
+*/
+ return 0;
+}
+
+static int ahash_update(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ return state->update(req);
+}
+
+static int ahash_finup(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->finup(req);
+}
+
+static int ahash_final(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->final(req);
+}
+
+static int ahash_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(struct caam_hash_ctx));
+ memcpy(out + sizeof(struct caam_hash_ctx), state,
+ sizeof(struct caam_hash_state));
+ return 0;
+}
+
+static int ahash_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ memcpy(ctx, in, sizeof(struct caam_hash_ctx));
+ memcpy(state, in + sizeof(struct caam_hash_ctx),
+ sizeof(struct caam_hash_state));
+ return 0;
+}
+
+struct caam_hash_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ struct ahash_alg template_ahash;
+ u32 alg_type;
+ u32 alg_op;
+};
+
+/* ahash descriptors */
+static struct caam_hash_template driver_hash[] = {
+ {
+ .name = "sha1",
+ .driver_name = "sha1-caam",
+ .hmac_name = "hmac(sha1)",
+ .hmac_driver_name = "hmac-sha1-caam",
+ .blocksize = SHA1_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA1,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "sha224",
+ .driver_name = "sha224-caam",
+ .hmac_name = "hmac(sha224)",
+ .hmac_driver_name = "hmac-sha224-caam",
+ .blocksize = SHA224_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA224,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "sha256",
+ .driver_name = "sha256-caam",
+ .hmac_name = "hmac(sha256)",
+ .hmac_driver_name = "hmac-sha256-caam",
+ .blocksize = SHA256_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA256,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "sha384",
+ .driver_name = "sha384-caam",
+ .hmac_name = "hmac(sha384)",
+ .hmac_driver_name = "hmac-sha384-caam",
+ .blocksize = SHA384_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA384,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "sha512",
+ .driver_name = "sha512-caam",
+ .hmac_name = "hmac(sha512)",
+ .hmac_driver_name = "hmac-sha512-caam",
+ .blocksize = SHA512_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA512,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "md5",
+ .driver_name = "md5-caam",
+ .hmac_name = "hmac(md5)",
+ .hmac_driver_name = "hmac-md5-caam",
+ .blocksize = MD5_BLOCK_WORDS * 4,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_MD5,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+};
+
+struct caam_hash_alg {
+ struct list_head entry;
+ struct device *ctrldev;
+ int alg_type;
+ int alg_op;
+ struct ahash_alg ahash_alg;
+};
+
+static int caam_hash_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct crypto_alg *base = tfm->__crt_alg;
+ struct hash_alg_common *halg =
+ container_of(base, struct hash_alg_common, base);
+ struct ahash_alg *alg =
+ container_of(halg, struct ahash_alg, halg);
+ struct caam_hash_alg *caam_hash =
+ container_of(alg, struct caam_hash_alg, ahash_alg);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
+ /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
+ HASH_MSG_LEN + SHA1_DIGEST_SIZE,
+ HASH_MSG_LEN + 32,
+ HASH_MSG_LEN + SHA256_DIGEST_SIZE,
+ HASH_MSG_LEN + 64,
+ HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+ int tgt_jr = atomic_inc_return(&priv->tfm_count);
+ int ret = 0;
+
+ /*
+ * distribute tfms across job rings to ensure in-order
+ * crypto request processing per tfm
+ */
+ ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
+
+ /* copy descriptor header template value */
+ ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+ ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
+
+ ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT];
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct caam_hash_state));
+
+ ret = ahash_set_sh_desc(ahash);
+
+ return ret;
+}
+
+static void caam_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->sh_desc_update_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
+ desc_bytes(ctx->sh_desc_update),
+ DMA_TO_DEVICE);
+ if (ctx->sh_desc_update_first_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
+ desc_bytes(ctx->sh_desc_update_first),
+ DMA_TO_DEVICE);
+ if (ctx->sh_desc_fin_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
+ desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
+ if (ctx->sh_desc_digest_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
+ desc_bytes(ctx->sh_desc_digest),
+ DMA_TO_DEVICE);
+ if (ctx->sh_desc_finup_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
+ desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
+}
+
+static struct caam_hash_alg *
+caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
+ bool keyed)
+{
+ struct caam_hash_alg *t_alg;
+ struct ahash_alg *halg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
+ if (!t_alg) {
+ dev_err(ctrldev, "failed to allocate t_alg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ t_alg->ahash_alg = template->template_ahash;
+ halg = &t_alg->ahash_alg;
+ alg = &halg->halg.base;
+
+ if (keyed) {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_driver_name);
+ } else {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ }
+ alg->cra_module = THIS_MODULE;
+ alg->cra_init = caam_hash_cra_init;
+ alg->cra_exit = caam_hash_cra_exit;
+ alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+ alg->cra_priority = CAAM_CRA_PRIORITY;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
+ alg->cra_type = &crypto_ahash_type;
+
+ t_alg->alg_type = template->alg_type;
+ t_alg->alg_op = template->alg_op;
+ t_alg->ctrldev = ctrldev;
+
+ return t_alg;
+}
+
+int caam_algapi_hash_startup(struct platform_device *pdev)
+{
+ struct device *ctrldev;
+ struct caam_drv_private *priv;
+ int i = 0, err = 0, md_limit = 0, md_inst;
+ u64 cha_inst;
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+
+ INIT_LIST_HEAD(&priv->hash_list);
+
+ atomic_set(&priv->tfm_count, -1);
+
+ /* register algorithms the device supports */
+ cha_inst = rd_reg64(&priv->ctrl->perfmon.cha_num);
+
+ md_inst = (cha_inst & CHA_ID_MD_MASK) >> CHA_ID_MD_SHIFT;
+ if (md_inst) {
+ md_limit = SHA512_DIGEST_SIZE;
+ if ((rd_reg64(&priv->ctrl->perfmon.cha_id) & CHA_ID_MD_MASK)
+ == CHA_ID_MD_LP256) { /* LP256 limits digest size */
+ md_limit = SHA256_DIGEST_SIZE;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
+ struct caam_hash_alg *t_alg;
+
+ /* If no MD instantiated, or MD too small, skip */
+ if ((!md_inst) ||
+ (driver_hash[i].template_ahash.halg.digestsize >
+ md_limit))
+ continue;
+ /* register hmac version */
+ t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(ctrldev, "%s alg allocation failed\n",
+ driver_hash[i].driver_name);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(ctrldev, "%s alg registration failed\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name);
+ kfree(t_alg);
+ } else
+ list_add_tail(&t_alg->entry, &priv->hash_list);
+
+ /* register unkeyed version */
+ t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(ctrldev, "%s alg allocation failed\n",
+ driver_hash[i].driver_name);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(ctrldev, "%s alg registration failed\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name);
+ kfree(t_alg);
+ } else
+ list_add_tail(&t_alg->entry, &priv->hash_list);
+ }
+
+ return err;
+}
+
+void caam_algapi_hash_shutdown(struct platform_device *pdev)
+{
+ struct device *ctrldev;
+ struct caam_drv_private *priv;
+ struct caam_hash_alg *t_alg, *n;
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+
+ if (!priv->hash_list.next)
+ return;
+
+ list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
+ crypto_unregister_ahash(&t_alg->ahash_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
+}
+
+#ifdef CONFIG_OF
+static void __exit caam_algapi_hash_exit(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev)
+ return;
+
+ of_node_put(dev_node);
+}
+
+static int __init caam_algapi_hash_init(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ int err = 0;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev)
+ return -ENODEV;
+
+ of_node_put(dev_node);
+
+ return caam_algapi_hash_startup(pdev);
+}
+
+module_init(caam_algapi_hash_init);
+module_exit(caam_algapi_hash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
+MODULE_AUTHOR("Freescale Semiconductor - NMG");
+#endif
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
new file mode 100644
index 000000000000..a4b4a3871bdf
--- /dev/null
+++ b/drivers/crypto/caam/caamrng.c
@@ -0,0 +1,381 @@
+/*
+ * caam - Freescale FSL CAAM support for hw_random
+ *
+ * Copyright (C) 2011-2012 Freescale Semiconductor, Inc.
+ *
+ * Based on caamalg.c crypto API driver.
+ *
+ * relationship between job descriptors to shared descriptors:
+ *
+ * --------------- --------------
+ * | JobDesc #0 |-------------------->| ShareDesc |
+ * | *(buffer 0) | |------------->| (generate) |
+ * --------------- | | (move) |
+ * | | (store) |
+ * --------------- | --------------
+ * | JobDesc #1 |------|
+ * | *(buffer 1) |
+ * ---------------
+ *
+ * A job desc looks like this:
+ *
+ * ---------------------
+ * | Header |
+ * | ShareDesc Pointer |
+ * | SEQ_OUT_PTR |
+ * | (output buffer) |
+ * ---------------------
+ *
+ * The SharedDesc never changes, and each job descriptor points to one of two
+ * buffers for each device, from which the data will be copied into the
+ * requested destination
+ */
+
+#include <linux/hw_random.h>
+#include <linux/completion.h>
+#include <linux/atomic.h>
+
+#include "compat.h"
+
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+
+/*
+ * Maximum buffer size: maximum number of random, cache-aligned bytes that
+ * will be generated and moved to seq out ptr (extlen not allowed)
+ */
+#define RN_BUF_SIZE (0xffff / L1_CACHE_BYTES * \
+ L1_CACHE_BYTES)
+
+/* length of descriptors */
+#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
+#define DESC_RNG_LEN (10 * CAAM_CMD_SZ)
+
+/* Buffer, its dma address and lock */
+struct buf_data {
+ u8 buf[RN_BUF_SIZE];
+ dma_addr_t addr;
+ struct completion filled;
+ u32 hw_desc[DESC_JOB_O_LEN];
+#define BUF_NOT_EMPTY 0
+#define BUF_EMPTY 1
+#define BUF_PENDING 2 /* Empty, but with job pending --don't submit another */
+ atomic_t empty;
+};
+
+/* rng per-device context */
+struct caam_rng_ctx {
+ struct device *jrdev;
+ dma_addr_t sh_desc_dma;
+ u32 sh_desc[DESC_RNG_LEN];
+ unsigned int cur_buf_idx;
+ int current_buf;
+ struct buf_data bufs[2];
+};
+
+static struct caam_rng_ctx rng_ctx;
+
+static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
+{
+ if (bd->addr) {
+ dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ }
+}
+
+static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
+{
+ struct device *jrdev = ctx->jrdev;
+
+ if (ctx->sh_desc_dma)
+ dma_unmap_single(jrdev, ctx->sh_desc_dma, DESC_RNG_LEN,
+ DMA_TO_DEVICE);
+ rng_unmap_buf(jrdev, &ctx->bufs[0]);
+ rng_unmap_buf(jrdev, &ctx->bufs[1]);
+}
+
+static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
+{
+ struct buf_data *bd;
+
+ bd = (struct buf_data *)((char *)desc -
+ offsetof(struct buf_data, hw_desc));
+
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ atomic_set(&bd->empty, BUF_NOT_EMPTY);
+ complete(&bd->filled);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
+ DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
+#endif
+}
+
+static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
+{
+ struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)];
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc = bd->hw_desc;
+ int err;
+
+ dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
+ init_completion(&bd->filled);
+ err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
+ if (err)
+ complete(&bd->filled); /* don't wait on failed job*/
+ else
+ atomic_inc(&bd->empty); /* note if pending */
+
+ return err;
+}
+
+static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct caam_rng_ctx *ctx = &rng_ctx;
+ struct buf_data *bd = &ctx->bufs[ctx->current_buf];
+ int next_buf_idx, copied_idx;
+ int err;
+
+ if (atomic_read(&bd->empty)) {
+ /* try to submit job if there wasn't one */
+ if (atomic_read(&bd->empty) == BUF_EMPTY) {
+ err = submit_job(ctx, 1);
+ /* if can't submit job, can't even wait */
+ if (err)
+ return 0;
+ }
+ /* no immediate data, so exit if not waiting */
+ if (!wait)
+ return 0;
+
+ /* waiting for pending job */
+ if (atomic_read(&bd->empty))
+ wait_for_completion(&bd->filled);
+ }
+
+ next_buf_idx = ctx->cur_buf_idx + max;
+ dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n",
+ __func__, ctx->current_buf, ctx->cur_buf_idx);
+
+ /* if enough data in current buffer */
+ if (next_buf_idx < RN_BUF_SIZE) {
+ memcpy(data, bd->buf + ctx->cur_buf_idx, max);
+ ctx->cur_buf_idx = next_buf_idx;
+ return max;
+ }
+
+ /* else, copy what's left... */
+ copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx;
+ memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx);
+ ctx->cur_buf_idx = 0;
+ atomic_set(&bd->empty, BUF_EMPTY);
+
+ /* ...refill... */
+ submit_job(ctx, 1);
+
+ /* and use next buffer */
+ ctx->current_buf = !ctx->current_buf;
+ dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf);
+
+ /* since there already is some data read, don't wait */
+ return copied_idx + caam_read(rng, data + copied_idx,
+ max - copied_idx, false);
+}
+
+static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
+{
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc = ctx->sh_desc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Propagate errors from shared to job descriptor */
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+
+ /* Generate random bytes */
+ append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
+
+ /* Store bytes */
+ append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE);
+
+ ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(jrdev, ctx->sh_desc_dma, desc_bytes(desc),
+ DMA_TO_DEVICE);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
+#endif
+}
+
+static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
+{
+ struct device *jrdev = ctx->jrdev;
+ struct buf_data *bd = &ctx->bufs[buf_id];
+ u32 *desc = bd->hw_desc;
+ int sh_len = desc_len(ctx->sh_desc);
+
+ init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER |
+ HDR_REVERSE);
+
+ bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
+
+ append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
+#endif
+}
+
+static void caam_cleanup(struct hwrng *rng)
+{
+ int i;
+ struct buf_data *bd;
+
+ for (i = 0; i < 2; i++) {
+ bd = &rng_ctx.bufs[i];
+ if (atomic_read(&bd->empty) == BUF_PENDING)
+ wait_for_completion(&bd->filled);
+ }
+
+ rng_unmap_ctx(&rng_ctx);
+}
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
+static inline void test_len(struct hwrng *rng, size_t len, bool wait)
+{
+ u8 *buf;
+ int real_len;
+
+ buf = kzalloc(sizeof(u8) * len, GFP_KERNEL);
+ real_len = rng->read(rng, buf, len, wait);
+ if (real_len == 0 && wait)
+ pr_err("WAITING FAILED\n");
+ pr_info("wanted %d bytes, got %d\n", len, real_len);
+ print_hex_dump(KERN_INFO, "random bytes@: ", DUMP_PREFIX_ADDRESS,
+ 16, 4, buf, real_len, 1);
+ kfree(buf);
+}
+
+static inline void test_mode_once(struct hwrng *rng, bool wait)
+{
+#define TEST_CHUNK (RN_BUF_SIZE / 4)
+
+ test_len(rng, TEST_CHUNK, wait);
+ test_len(rng, RN_BUF_SIZE * 2, wait);
+ test_len(rng, RN_BUF_SIZE * 2 - TEST_CHUNK, wait);
+}
+
+static inline void test_mode(struct hwrng *rng, bool wait)
+{
+#define TEST_PASS 1
+ int i;
+
+ for (i = 0; i < TEST_PASS; i++)
+ test_mode_once(rng, wait);
+}
+
+static void self_test(struct hwrng *rng)
+{
+ pr_info("testing without waiting\n");
+ test_mode(rng, false);
+ pr_info("testing with waiting\n");
+ test_mode(rng, true);
+}
+#endif
+
+static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
+{
+ struct buf_data *bd = &ctx->bufs[buf_id];
+
+ rng_create_job_desc(ctx, buf_id);
+ atomic_set(&bd->empty, BUF_EMPTY);
+ submit_job(ctx, buf_id == ctx->current_buf);
+ wait_for_completion(&bd->filled);
+}
+
+static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
+{
+ ctx->jrdev = jrdev;
+ rng_create_sh_desc(ctx);
+ ctx->current_buf = 0;
+ ctx->cur_buf_idx = 0;
+ caam_init_buf(ctx, 0);
+ caam_init_buf(ctx, 1);
+}
+
+static struct hwrng caam_rng = {
+ .name = "rng-caam",
+ .cleanup = caam_cleanup,
+ .read = caam_read,
+};
+
+int caam_rng_startup(struct platform_device *pdev)
+{
+ struct device *ctrldev;
+ struct caam_drv_private *priv;
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+
+ /* Check RNG present in hardware before registration */
+ if (!(rd_reg64(&priv->ctrl->perfmon.cha_num) & CHA_ID_RNG_MASK))
+ return -ENODEV;
+
+ caam_init_rng(&rng_ctx, priv->jrdev[0]);
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
+ self_test(&caam_rng);
+#endif
+
+ dev_info(priv->jrdev[0], "registering rng-caam\n");
+ return hwrng_register(&caam_rng);
+}
+
+void caam_rng_shutdown(void)
+{
+ hwrng_unregister(&caam_rng);
+}
+
+#ifdef CONFIG_OF
+static void __exit caam_rng_exit(void)
+{
+ hwrng_unregister(&caam_rng);
+}
+
+static int __init caam_rng_init(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev)
+ return -ENODEV;
+
+ of_node_put(dev_node);
+
+}
+
+module_init(caam_rng_init);
+module_exit(caam_rng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
+MODULE_AUTHOR("Freescale Semiconductor - NMG");
+#endif
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 950450346f70..28d9670ff834 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
*/
#ifndef CAAM_COMPAT_H
@@ -12,7 +12,6 @@
#include <linux/interrupt.h>
#include <linux/crypto.h>
#include <linux/hw_random.h>
-#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/spinlock.h>
@@ -22,14 +21,29 @@
#include <linux/types.h>
#include <linux/debugfs.h>
#include <linux/circ_buf.h>
+
+#ifdef CONFIG_OF
+#include <linux/of_platform.h>
+#else
+#include <linux/platform_device.h>
+#endif
+
+#ifdef CONFIG_ARM /* needs the clock control subsystem */
+#include <linux/clk.h>
+#include <asm/cacheflush.h>
+#endif
+
#include <net/xfrm.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/sha.h>
+#include <crypto/md5.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/internal/hash.h>
#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 9009713a3c2e..6ea81c9d54f4 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -2,13 +2,16 @@
* CAAM control-plane driver backend
* Controller-level driver, kernel property detection, initialization
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
*/
#include "compat.h"
#include "regs.h"
+#include "snvsregs.h"
#include "intern.h"
#include "jr.h"
+#include "desc_constr.h"
+#include "error.h"
static int caam_remove(struct platform_device *pdev)
{
@@ -22,6 +25,28 @@ static int caam_remove(struct platform_device *pdev)
ctrlpriv = dev_get_drvdata(ctrldev);
topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+#ifndef CONFIG_OF
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO
+ caam_secvio_shutdown(pdev);
+#endif /* SECVIO */
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_SM
+ caam_sm_shutdown(pdev);
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
+ if (ctrlpriv->rng_inst)
+ caam_rng_shutdown();
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
+ caam_algapi_hash_shutdown(pdev);
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+ caam_algapi_shutdown(pdev);
+#endif
+#endif
/* shut down JobRs */
for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
@@ -34,27 +59,161 @@ static int caam_remove(struct platform_device *pdev)
debugfs_remove_recursive(ctrlpriv->dfs_root);
#endif
+ /* Unmap SNVS and Secure Memory */
+ iounmap(ctrlpriv->snvs);
+ iounmap(ctrlpriv->sm_base);
+
/* Unmap controller region */
iounmap(&topregs->ctrl);
+ /* shut clocks off before finalizing shutdown */
+ clk_disable(ctrlpriv->caam_clk);
+
kfree(ctrlpriv->jrdev);
kfree(ctrlpriv);
return ret;
}
+/*
+ * Descriptor to instantiate RNG State Handle 0 in normal mode and
+ * load the JDKEK, TDKEK and TDSK registers
+ */
+static void build_instantiation_desc(u32 *desc)
+{
+ u32 *jump_cmd;
+
+ init_job_desc(desc, 0);
+
+ /* INIT RNG in non-test mode */
+ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ OP_ALG_AS_INIT);
+
+ /* wait for done */
+ jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ /*
+ * load 1 to clear written reg:
+ * resets the done interrrupt and returns the RNG to idle.
+ */
+ append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+
+ /* generate secure keys (non-test) */
+ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ OP_ALG_RNG4_SK);
+}
+
+struct instantiate_result {
+ struct completion completion;
+ int err;
+};
+
+static void rng4_init_done(struct device *dev, u32 *desc, u32 err,
+ void *context)
+{
+ struct instantiate_result *instantiation = context;
+
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ instantiation->err = err;
+ complete(&instantiation->completion);
+}
+
+static int instantiate_rng(struct device *jrdev)
+{
+ struct instantiate_result instantiation;
+
+ dma_addr_t desc_dma;
+ u32 *desc;
+ int ret;
+
+ desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
+ if (!desc) {
+ dev_err(jrdev, "cannot allocate RNG init descriptor memory\n");
+ return -ENOMEM;
+ }
+
+ build_instantiation_desc(desc);
+ desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, desc_dma, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ init_completion(&instantiation.completion);
+
+ ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation);
+ if (!ret) {
+ wait_for_completion_interruptible(&instantiation.completion);
+ ret = instantiation.err;
+ if (ret)
+ dev_err(jrdev, "unable to instantiate RNG\n");
+ }
+
+ dma_unmap_single(jrdev, desc_dma, desc_bytes(desc), DMA_TO_DEVICE);
+
+ kfree(desc);
+
+ return ret;
+}
+
+/*
+ * By default, the TRNG runs for 200 clocks per sample;
+ * 1600 clocks per sample generates better entropy.
+ */
+static void kick_trng(struct platform_device *pdev)
+{
+ struct device *ctrldev = &pdev->dev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+ struct caam_full __iomem *topregs;
+ struct rng4tst __iomem *r4tst;
+ u32 val;
+
+ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+ r4tst = &topregs->ctrl.r4tst[0];
+
+ /* put RNG4 into program mode */
+ setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+ /* Set clocks per sample to the default, and divider to zero */
+ val = rd_reg32(&r4tst->rtsdctl);
+ val = ((val & ~RTSDCTL_ENT_DLY_MASK) |
+ (RNG4_ENT_CLOCKS_SAMPLE << RTSDCTL_ENT_DLY_SHIFT)) &
+ ~RTMCTL_OSC_DIV_MASK;
+ wr_reg32(&r4tst->rtsdctl, val);
+ /* min. freq. count */
+ wr_reg32(&r4tst->rtfrqmin, RNG4_ENT_CLOCKS_SAMPLE / 4);
+ /* max. freq. count */
+ wr_reg32(&r4tst->rtfrqmax, RNG4_ENT_CLOCKS_SAMPLE * 8);
+ /* put RNG4 into run mode */
+ clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+}
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
- int d, ring, rspec;
+ int d, ring, rspec, status;
struct device *dev;
- struct device_node *nprop, *np;
+ struct device_node *np;
struct caam_ctrl __iomem *ctrl;
struct caam_full __iomem *topregs;
+ struct snvs_full __iomem *snvsregs;
+ void __iomem *sm_base;
struct caam_drv_private *ctrlpriv;
- struct caam_perfmon *perfmon;
- struct caam_deco **deco;
u32 deconum;
+#ifdef CONFIG_DEBUG_FS
+ struct caam_perfmon *perfmon;
+#endif
+#ifdef CONFIG_OF
+ struct device_node *nprop;
+#else
+ struct resource *res;
+ char *rname, inst;
+#endif
+#ifdef CONFIG_ARM
+ int ret = 0;
+#endif
ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
if (!ctrlpriv)
@@ -63,22 +222,121 @@ static int caam_probe(struct platform_device *pdev)
dev = &pdev->dev;
dev_set_drvdata(dev, ctrlpriv);
ctrlpriv->pdev = pdev;
- nprop = pdev->dev.of_node;
/* Get configuration properties from device tree */
/* First, get register page */
+#ifdef CONFIG_OF
+ nprop = pdev->dev.of_node;
ctrl = of_iomap(nprop, 0);
if (ctrl == NULL) {
dev_err(dev, "caam: of_iomap() failed\n");
return -ENOMEM;
}
+#else
+ /* Get the named resource for the controller base address */
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "iobase_caam");
+ if (!res) {
+ dev_err(dev, "caam: invalid address resource type\n");
+ return -ENODEV;
+ }
+ /*ctrl = ioremap(res->start, SZ_64K);*/
+ ctrl = ioremap(res->start, ((res->end - res->start)+1));
+ if (ctrl == NULL) {
+ dev_err(dev, "caam: ioremap() failed\n");
+ return -ENOMEM;
+ }
+#endif
+
ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
/* topregs used to derive pointers to CAAM sub-blocks only */
topregs = (struct caam_full __iomem *)ctrl;
- /* Get the IRQ of the controller (for security violations only) */
+ /*
+ * Next, map SNVS register page
+ * FIXME: MX6 has a separate RTC driver using SNVS. This driver
+ * will have a mapped pointer to SNVS registers also, which poses
+ * a conflict if we're not very careful to stay away from registers
+ * and interrupts that it uses. In the future, pieces of that driver
+ * may need to migrate down here. In the meantime, use caution with
+ * this pointer. Also note that the snvs-rtc driver probably controls
+ * SNVS device clocks.
+ */
+#ifdef CONFIG_OF
+ /* Get SNVS register page */
+#else
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_snvs");
+ if (!res) {
+ dev_err(dev, "snvs: invalid address resource type\n");
+ return -ENODEV;
+ }
+ snvsregs = ioremap(res->start, res->end - res->start + 1);
+ if (snvsregs == NULL) {
+ dev_err(dev, "snvs: ioremap() failed\n");
+ iounmap(ctrl);
+ return -ENOMEM;
+ }
+#endif
+ ctrlpriv->snvs = (struct snvs_full __force *)snvsregs;
+
+ /* Now map CAAM-Secure Memory Space */
+#ifdef CONFIG_OF
+ /* Get CAAM-SM node and of_iomap() and save */
+#else
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "iobase_caam_sm");
+ if (!res) {
+ dev_err(dev, "caam_sm: invalid address resource type\n");
+ return -ENODEV;
+ }
+ sm_base = ioremap_nocache(res->start, res->end - res->start + 1);
+ if (sm_base == NULL) {
+ dev_err(dev, "caam_sm: ioremap_nocache() failed\n");
+ iounmap(ctrl);
+ iounmap(snvsregs);
+ return -ENOMEM;
+ }
+#endif
+ ctrlpriv->sm_base = (void __force *)sm_base;
+ ctrlpriv->sm_size = res->end - res->start + 1;
+
+ /*
+ * Get the IRQ for security violations
+ */
+#ifdef CONFIG_OF
ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
+#else
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "irq_sec_vio");
+ if (!res) {
+ dev_err(dev, "caam: invalid IRQ resource type\n");
+ return -ENODEV;
+ }
+ ctrlpriv->secvio_irq = res->start;
+#endif
+
+/*
+ * ARM targets tend to have clock control subsystems that can
+ * enable/disable clocking to our device. Turn clocking on to proceed
+ */
+#ifdef CONFIG_ARM
+ ctrlpriv->caam_clk = clk_get(&ctrlpriv->pdev->dev, "caam_clk");
+ if (IS_ERR(ctrlpriv->caam_clk)) {
+ ret = PTR_ERR(ctrlpriv->caam_clk);
+ dev_err(&ctrlpriv->pdev->dev,
+ "can't identify CAAM bus clk: %d\n", ret);
+ return -ENODEV;
+ }
+ ret = clk_enable(ctrlpriv->caam_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM bus clock: %d\n", ret);
+ return -ENODEV;
+ }
+
+ pr_debug("%s caam_clk:%d\n", __func__,
+ (int)clk_get_rate(ctrlpriv->caam_clk));
+#endif
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
@@ -87,8 +345,26 @@ static int caam_probe(struct platform_device *pdev)
setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+#if (defined(CONFIG_ARCH_MX6) || defined(CONFIG_ARCH_MVF))
+ /*
+ * ERRATA: mx6 devices have an issue wherein AXI bus transactions
+ * may not occur in the correct order. This isn't a problem running
+ * single descriptors, but can be if running multiple concurrent
+ * descriptors. Reworking the driver to throttle to single requests
+ * is impractical, thus the workaround is to limit the AXI pipeline
+ * to a depth of 1 (from it's default of 4) to preclude this situation
+ * from occurring.
+ */
+ wr_reg32(&topregs->ctrl.mcr,
+ (rd_reg32(&topregs->ctrl.mcr) & ~(MCFGR_AXIPIPE_MASK)) |
+ ((1 << MCFGR_AXIPIPE_SHIFT) & MCFGR_AXIPIPE_MASK));
+#endif
+
+ /* Set DMA masks according to platform ranging */
if (sizeof(dma_addr_t) == sizeof(u64))
dma_set_mask(dev, DMA_BIT_MASK(36));
+ else
+ dma_set_mask(dev, DMA_BIT_MASK(32));
/* Find out how many DECOs are present */
deconum = (rd_reg64(&topregs->ctrl.perfmon.cha_num) &
@@ -97,18 +373,41 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->deco = kmalloc(deconum * sizeof(struct caam_deco *),
GFP_KERNEL);
- deco = (struct caam_deco __force **)&topregs->deco;
- for (d = 0; d < deconum; d++)
- ctrlpriv->deco[d] = deco[d];
-
/*
* Detect and enable JobRs
* First, find out how many ring spec'ed, allocate references
* for all, then go probe each one.
*/
rspec = 0;
+#ifdef CONFIG_OF
for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
rspec++;
+#else
+ np = NULL;
+
+ /* Build the name of the IRQ platform resources to identify */
+ rname = kzalloc(strlen(JR_IRQRES_NAME_ROOT) + 1, 0);
+ if (rname == NULL) {
+ iounmap(&topregs->ctrl);
+ return -ENOMEM;
+ }
+
+ /*
+ * Emulate behavor of for_each_compatible_node() for non OF targets
+ * Identify all IRQ platform resources present
+ */
+ for (d = 0; d < 4; d++) {
+ rname[0] = 0;
+ inst = '0' + d;
+ strcat(rname, JR_IRQRES_NAME_ROOT);
+ strncat(rname, &inst, 1);
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, rname);
+ if (res)
+ rspec++;
+ }
+ kfree(rname);
+#endif
ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
if (ctrlpriv->jrdev == NULL) {
iounmap(&topregs->ctrl);
@@ -117,7 +416,11 @@ static int caam_probe(struct platform_device *pdev)
ring = 0;
ctrlpriv->total_jobrs = 0;
+#ifdef CONFIG_OF
for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
+#else
+ for (d = 0; d < rspec; d++) {
+#endif
caam_jr_probe(pdev, np, ring);
ctrlpriv->total_jobrs++;
ring++;
@@ -139,6 +442,24 @@ static int caam_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ /*
+ * RNG4 based SECs (v5+ | >= i.MX6) need special initialization prior
+ * to executing any descriptors. If there's a problem with init,
+ * remove other subsystems and return; internal padding functions
+ * cannot run without an RNG. This procedure assumes a single RNG4
+ * instance.
+ */
+ if ((rd_reg64(&topregs->ctrl.perfmon.cha_id) & CHA_ID_RNG_MASK)
+ == CHA_ID_RNG_4) {
+ kick_trng(pdev);
+ ret = instantiate_rng(ctrlpriv->jrdev[0]);
+ if (ret) {
+ caam_remove(pdev);
+ return -ENODEV;
+ }
+ ctrlpriv->rng_inst++;
+ }
+
/* NOTE: RTIC detection ought to go here, around Si time */
/* Initialize queue allocator lock */
@@ -164,52 +485,52 @@ static int caam_probe(struct platform_device *pdev)
/* Controller-level - performance monitor counters */
ctrlpriv->ctl_rq_dequeued =
debugfs_create_u64("rq_dequeued",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->req_dequeued);
ctrlpriv->ctl_ob_enc_req =
debugfs_create_u64("ob_rq_encrypted",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ob_enc_req);
ctrlpriv->ctl_ib_dec_req =
debugfs_create_u64("ib_rq_decrypted",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ib_dec_req);
ctrlpriv->ctl_ob_enc_bytes =
debugfs_create_u64("ob_bytes_encrypted",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ob_enc_bytes);
ctrlpriv->ctl_ob_prot_bytes =
debugfs_create_u64("ob_bytes_protected",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ob_prot_bytes);
ctrlpriv->ctl_ib_dec_bytes =
debugfs_create_u64("ib_bytes_decrypted",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ib_dec_bytes);
ctrlpriv->ctl_ib_valid_bytes =
debugfs_create_u64("ib_bytes_validated",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ib_valid_bytes);
/* Controller level - global status values */
ctrlpriv->ctl_faultaddr =
debugfs_create_u64("fault_addr",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->faultaddr);
ctrlpriv->ctl_faultdetail =
debugfs_create_u32("fault_detail",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->faultdetail);
ctrlpriv->ctl_faultstatus =
debugfs_create_u32("fault_status",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->status);
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_kek = debugfs_create_blob("kek",
- S_IFCHR | S_IRUSR |
+ S_IRUSR |
S_IRGRP | S_IROTH,
ctrlpriv->ctl,
&ctrlpriv->ctl_kek_wrap);
@@ -217,7 +538,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
- S_IFCHR | S_IRUSR |
+ S_IRUSR |
S_IRGRP | S_IROTH,
ctrlpriv->ctl,
&ctrlpriv->ctl_tkek_wrap);
@@ -225,14 +546,60 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
- S_IFCHR | S_IRUSR |
+ S_IRUSR |
S_IRGRP | S_IROTH,
ctrlpriv->ctl,
&ctrlpriv->ctl_tdsk_wrap);
#endif
- return 0;
+
+/*
+ * Non OF configurations use plaform_device, and therefore cannot simply
+ * go and get a device node by name, which the algapi module startup code
+ * assumes is possible. Therefore, non OF configurations will have to
+ * start up the API code explicitly, and forego modularization
+ */
+#ifndef CONFIG_OF
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+ status = caam_algapi_startup(pdev);
+ if (status) {
+ caam_remove(pdev);
+ return status;
+ }
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
+ status = caam_algapi_hash_startup(pdev);
+ if (status) {
+ caam_remove(pdev);
+ return status;
+ }
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
+ if (ctrlpriv->rng_inst)
+ caam_rng_startup(pdev);
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_SM
+ status = caam_sm_startup(pdev);
+ if (status) {
+ caam_remove(pdev);
+ return status;
+ }
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_SM_TEST
+ caam_sm_example_init(pdev);
+#endif /* SM_TEST */
+#endif /* SM */
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO
+ caam_secvio_startup(pdev);
+#endif /* SECVIO */
+
+#endif /* CONFIG_OF */
+ return status;
}
+#ifdef CONFIG_OF
static struct of_device_id caam_match[] = {
{
.compatible = "fsl,sec-v4.0",
@@ -240,12 +607,17 @@ static struct of_device_id caam_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, caam_match);
+#endif /* CONFIG_OF */
static struct platform_driver caam_driver = {
.driver = {
.name = "caam",
.owner = THIS_MODULE,
+#ifdef CONFIG_OF
.of_match_table = caam_match,
+#else
+
+#endif
},
.probe = caam_probe,
.remove = __devexit_p(caam_remove),
@@ -253,12 +625,20 @@ static struct platform_driver caam_driver = {
static int __init caam_base_init(void)
{
+#ifdef CONFIG_OF
+ return of_register_platform_driver(&caam_driver);
+#else
return platform_driver_register(&caam_driver);
+#endif
}
static void __exit caam_base_exit(void)
{
+#ifdef CONFIG_OF
+ return of_unregister_platform_driver(&caam_driver);
+#else
return platform_driver_unregister(&caam_driver);
+#endif
}
module_init(caam_base_init);
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 974a75842da9..573d9d1c3f5d 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -2,14 +2,39 @@
* CAAM descriptor composition header
* Definitions to support CAAM descriptor instruction generation
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
*/
#ifndef DESC_H
#define DESC_H
+/*
+ * 16-byte hardware scatter/gather table
+ * An 8-byte table exists in the hardware spec, but has never been
+ * implemented to date. The 8/16 option is selected at RTL-compile-time.
+ * and this selection is visible in the Compile Time Parameters Register
+ */
+
+#define SEC4_SG_LEN_EXT 0x80000000 /* Entry points to table */
+#define SEC4_SG_LEN_FIN 0x40000000 /* Last ent in table */
+#define SEC4_SG_BPID_MASK 0x000000ff
+#define SEC4_SG_BPID_SHIFT 16
+#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
+#define SEC4_SG_OFFS_MASK 0x00001fff
+
+struct sec4_sg_entry {
+#ifdef CONFIG_64BIT
+ u64 ptr;
+#else
+ u32 reserved;
+ u32 ptr;
+#endif
+ u32 len;
+ u32 bpid_offset; /* BPID in high, offset in lowest bits */
+};
+
/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
-#define MAX_CAAM_DESCSIZE 64
+#define MAX_CAAM_DESCSIZE 64
/* Block size of any entity covered/uncovered with a KEK/TKEK */
#define KEK_BLOCKSIZE 16
@@ -18,38 +43,38 @@
* Supported descriptor command types as they show up
* inside a descriptor command word.
*/
-#define CMD_SHIFT 27
-#define CMD_MASK 0xf8000000
-
-#define CMD_KEY (0x00 << CMD_SHIFT)
-#define CMD_SEQ_KEY (0x01 << CMD_SHIFT)
-#define CMD_LOAD (0x02 << CMD_SHIFT)
-#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
-#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
-#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
-#define CMD_STORE (0x0a << CMD_SHIFT)
-#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
-#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
-#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT)
-#define CMD_MOVE_LEN (0x0e << CMD_SHIFT)
-#define CMD_MOVE (0x0f << CMD_SHIFT)
-#define CMD_OPERATION (0x10 << CMD_SHIFT)
-#define CMD_SIGNATURE (0x12 << CMD_SHIFT)
-#define CMD_JUMP (0x14 << CMD_SHIFT)
-#define CMD_MATH (0x15 << CMD_SHIFT)
-#define CMD_DESC_HDR (0x16 << CMD_SHIFT)
-#define CMD_SHARED_DESC_HDR (0x17 << CMD_SHIFT)
-#define CMD_SEQ_IN_PTR (0x1e << CMD_SHIFT)
-#define CMD_SEQ_OUT_PTR (0x1f << CMD_SHIFT)
+#define CMD_SHIFT 27
+#define CMD_MASK 0xf8000000
+
+#define CMD_KEY (0x00 << CMD_SHIFT)
+#define CMD_SEQ_KEY (0x01 << CMD_SHIFT)
+#define CMD_LOAD (0x02 << CMD_SHIFT)
+#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
+#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
+#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
+#define CMD_STORE (0x0a << CMD_SHIFT)
+#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
+#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
+#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT)
+#define CMD_MOVE_LEN (0x0e << CMD_SHIFT)
+#define CMD_MOVE (0x0f << CMD_SHIFT)
+#define CMD_OPERATION (0x10 << CMD_SHIFT)
+#define CMD_SIGNATURE (0x12 << CMD_SHIFT)
+#define CMD_JUMP (0x14 << CMD_SHIFT)
+#define CMD_MATH (0x15 << CMD_SHIFT)
+#define CMD_DESC_HDR (0x16 << CMD_SHIFT)
+#define CMD_SHARED_DESC_HDR (0x17 << CMD_SHIFT)
+#define CMD_SEQ_IN_PTR (0x1e << CMD_SHIFT)
+#define CMD_SEQ_OUT_PTR (0x1f << CMD_SHIFT)
/* General-purpose class selector for all commands */
-#define CLASS_SHIFT 25
-#define CLASS_MASK (0x03 << CLASS_SHIFT)
+#define CLASS_SHIFT 25
+#define CLASS_MASK (0x03 << CLASS_SHIFT)
-#define CLASS_NONE (0x00 << CLASS_SHIFT)
-#define CLASS_1 (0x01 << CLASS_SHIFT)
-#define CLASS_2 (0x02 << CLASS_SHIFT)
-#define CLASS_BOTH (0x03 << CLASS_SHIFT)
+#define CLASS_NONE (0x00 << CLASS_SHIFT)
+#define CLASS_1 (0x01 << CLASS_SHIFT)
+#define CLASS_2 (0x02 << CLASS_SHIFT)
+#define CLASS_BOTH (0x03 << CLASS_SHIFT)
/*
* Descriptor header command constructs
@@ -60,82 +85,82 @@
* Do Not Run - marks a descriptor inexecutable if there was
* a preceding error somewhere
*/
-#define HDR_DNR 0x01000000
+#define HDR_DNR 0x01000000
/*
* ONE - should always be set. Combination of ONE (always
* set) and ZRO (always clear) forms an endianness sanity check
*/
-#define HDR_ONE 0x00800000
-#define HDR_ZRO 0x00008000
+#define HDR_ONE 0x00800000
+#define HDR_ZRO 0x00008000
/* Start Index or SharedDesc Length */
-#define HDR_START_IDX_MASK 0x3f
-#define HDR_START_IDX_SHIFT 16
+#define HDR_START_IDX_MASK 0x3f
+#define HDR_START_IDX_SHIFT 16
/* If shared descriptor header, 6-bit length */
-#define HDR_DESCLEN_SHR_MASK 0x3f
+#define HDR_DESCLEN_SHR_MASK 0x3f
/* If non-shared header, 7-bit length */
-#define HDR_DESCLEN_MASK 0x7f
+#define HDR_DESCLEN_MASK 0x7f
/* This is a TrustedDesc (if not SharedDesc) */
-#define HDR_TRUSTED 0x00004000
+#define HDR_TRUSTED 0x00004000
/* Make into TrustedDesc (if not SharedDesc) */
-#define HDR_MAKE_TRUSTED 0x00002000
+#define HDR_MAKE_TRUSTED 0x00002000
/* Save context if self-shared (if SharedDesc) */
-#define HDR_SAVECTX 0x00001000
+#define HDR_SAVECTX 0x00001000
/* Next item points to SharedDesc */
-#define HDR_SHARED 0x00001000
+#define HDR_SHARED 0x00001000
/*
* Reverse Execution Order - execute JobDesc first, then
* execute SharedDesc (normally SharedDesc goes first).
*/
-#define HDR_REVERSE 0x00000800
+#define HDR_REVERSE 0x00000800
/* Propogate DNR property to SharedDesc */
-#define HDR_PROP_DNR 0x00000800
+#define HDR_PROP_DNR 0x00000800
/* JobDesc/SharedDesc share property */
-#define HDR_SD_SHARE_MASK 0x03
-#define HDR_SD_SHARE_SHIFT 8
-#define HDR_JD_SHARE_MASK 0x07
-#define HDR_JD_SHARE_SHIFT 8
+#define HDR_SD_SHARE_MASK 0x03
+#define HDR_SD_SHARE_SHIFT 8
+#define HDR_JD_SHARE_MASK 0x07
+#define HDR_JD_SHARE_SHIFT 8
-#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
-#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
-#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT)
-#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT)
-#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT)
/* JobDesc/SharedDesc descriptor length */
-#define HDR_JD_LENGTH_MASK 0x7f
-#define HDR_SD_LENGTH_MASK 0x3f
+#define HDR_JD_LENGTH_MASK 0x7f
+#define HDR_SD_LENGTH_MASK 0x3f
/*
* KEY/SEQ_KEY Command Constructs
*/
-/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
-#define KEY_DEST_CLASS_SHIFT 25 /* use CLASS_1 or CLASS_2 */
-#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT)
+/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
+#define KEY_DEST_CLASS_SHIFT 25 /* use CLASS_1 or CLASS_2 */
+#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT)
/* Scatter-Gather Table/Variable Length Field */
-#define KEY_SGF 0x01000000
-#define KEY_VLF 0x01000000
+#define KEY_SGF 0x01000000
+#define KEY_VLF 0x01000000
/* Immediate - Key follows command in the descriptor */
-#define KEY_IMM 0x00800000
+#define KEY_IMM 0x00800000
/*
* Encrypted - Key is encrypted either with the KEK, or
* with the TDKEK if TK is set
*/
-#define KEY_ENC 0x00400000
+#define KEY_ENC 0x00400000
/*
* No Write Back - Do not allow key to be FIFO STOREd
@@ -156,16 +181,16 @@
* KDEST - Key Destination: 0 - class key register,
* 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split-key
*/
-#define KEY_DEST_SHIFT 16
-#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT)
+#define KEY_DEST_SHIFT 16
+#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT)
-#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT)
-#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT)
-#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT)
-#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT)
+#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT)
+#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT)
+#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT)
+#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT)
/* Length in bytes */
-#define KEY_LENGTH_MASK 0x000003ff
+#define KEY_LENGTH_MASK 0x000003ff
/*
* LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
@@ -175,25 +200,25 @@
* Load/Store Destination: 0 = class independent CCB,
* 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
*/
-#define LDST_CLASS_SHIFT 25
-#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT)
-#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT)
-#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT)
-#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT)
-#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_SHIFT 25
+#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT)
/* Scatter-Gather Table/Variable Length Field */
-#define LDST_SGF 0x01000000
+#define LDST_SGF 0x01000000
#define LDST_VLF LDST_SGF
-/* Immediate - Key follows this command in descriptor */
-#define LDST_IMM_MASK 1
-#define LDST_IMM_SHIFT 23
-#define LDST_IMM (LDST_IMM_MASK << LDST_IMM_SHIFT)
+/* Immediate - Key follows this command in descriptor */
+#define LDST_IMM_MASK 1
+#define LDST_IMM_SHIFT 23
+#define LDST_IMM (LDST_IMM_MASK << LDST_IMM_SHIFT)
-/* SRC/DST - Destination for LOAD, Source for STORE */
-#define LDST_SRCDST_SHIFT 16
-#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT)
+/* SRC/DST - Destination for LOAD, Source for STORE */
+#define LDST_SRCDST_SHIFT 16
+#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_BYTE_CONTEXT (0x20 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_BYTE_KEY (0x40 << LDST_SRCDST_SHIFT)
@@ -205,64 +230,64 @@
#define LDST_SRCDST_WORD_DATASZ_REG (0x02 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_ICVSZ_REG (0x03 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_CHACTRL (0x06 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_IRQCTRL (0x07 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_CLRW (0x08 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_STAT (0x09 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
-
-/* Offset in source/destination */
-#define LDST_OFFSET_SHIFT 8
-#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
+
+/* Offset in source/destination */
+#define LDST_OFFSET_SHIFT 8
+#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT)
/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
-#define LDOFF_CHG_SHARE_SHIFT 0
-#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT)
-#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT)
-#define LDOFF_CHG_SHARE_OK_NO_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT)
-#define LDOFF_CHG_SHARE_OK_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT)
-
-#define LDOFF_ENABLE_AUTO_NFIFO (1 << 2)
-#define LDOFF_DISABLE_AUTO_NFIFO (1 << 3)
-
-#define LDOFF_CHG_NONSEQLIODN_SHIFT 4
-#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
-#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
-#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
-#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
-
-#define LDOFF_CHG_SEQLIODN_SHIFT 6
-#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
-#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
-#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
-#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
-
-/* Data length in bytes */
-#define LDST_LEN_SHIFT 0
-#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT)
+#define LDOFF_CHG_SHARE_SHIFT 0
+#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_NO_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT)
+
+#define LDOFF_ENABLE_AUTO_NFIFO (1 << 2)
+#define LDOFF_DISABLE_AUTO_NFIFO (1 << 3)
+
+#define LDOFF_CHG_NONSEQLIODN_SHIFT 4
+#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+
+#define LDOFF_CHG_SEQLIODN_SHIFT 6
+#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+
+/* Data length in bytes */
+#define LDST_LEN_SHIFT 0
+#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT)
/* Special Length definitions when dst=deco-ctrl */
-#define LDLEN_ENABLE_OSL_COUNT (1 << 7)
-#define LDLEN_RST_CHA_OFIFO_PTR (1 << 6)
-#define LDLEN_RST_OFIFO (1 << 5)
-#define LDLEN_SET_OFIFO_OFF_VALID (1 << 4)
-#define LDLEN_SET_OFIFO_OFF_RSVD (1 << 3)
-#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
-#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+#define LDLEN_ENABLE_OSL_COUNT (1 << 7)
+#define LDLEN_RST_CHA_OFIFO_PTR (1 << 6)
+#define LDLEN_RST_OFIFO (1 << 5)
+#define LDLEN_SET_OFIFO_OFF_VALID (1 << 4)
+#define LDLEN_SET_OFIFO_OFF_RSVD (1 << 3)
+#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
+#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
/*
* FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
@@ -274,808 +299,825 @@
* 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
* Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
*/
-#define FIFOLD_CLASS_SHIFT 25
-#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT)
-#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT)
-#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT)
-#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT)
-#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT)
-
-#define FIFOST_CLASS_SHIFT 25
-#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT)
-#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT)
-#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT)
-#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT)
+#define FIFOLD_CLASS_SHIFT 25
+#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT)
+
+#define FIFOST_CLASS_SHIFT 25
+#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT)
/*
* Scatter-Gather Table/Variable Length Field
* If set for FIFO_LOAD, refers to a SG table. Within
* SEQ_FIFO_LOAD, is variable input sequence
*/
-#define FIFOLDST_SGF_SHIFT 24
-#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT)
-#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT)
-#define FIFOLDST_SGF (1 << FIFOLDST_SGF_SHIFT)
-#define FIFOLDST_VLF (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF_SHIFT 24
+#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF (1 << FIFOLDST_SGF_SHIFT)
/* Immediate - Data follows command in descriptor */
-#define FIFOLD_IMM_SHIFT 23
-#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT)
-#define FIFOLD_IMM (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM_SHIFT 23
+#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM (1 << FIFOLD_IMM_SHIFT)
/* Continue - Not the last FIFO store to come */
-#define FIFOST_CONT_SHIFT 23
-#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
-#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
+#define FIFOST_CONT_SHIFT 23
+#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
+#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
/*
* Extended Length - use 32-bit extended length that
* follows the pointer field. Illegal with IMM set
*/
-#define FIFOLDST_EXT_SHIFT 22
-#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT)
-#define FIFOLDST_EXT (1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT_SHIFT 22
+#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT (1 << FIFOLDST_EXT_SHIFT)
/* Input data type.*/
-#define FIFOLD_TYPE_SHIFT 16
-#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */
-#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_SHIFT 16
+#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */
+#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT)
/* PK types */
-#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT)
#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
/* Other types. Need to OR in last/flush bits as desired */
-#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT)
/* Last/Flush bits for use with "other" types above */
-#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT)
#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
-#define FIFOLDST_LEN_MASK 0xffff
-#define FIFOLDST_EXT_LEN_MASK 0xffffffff
+#define FIFOLDST_LEN_MASK 0xffff
+#define FIFOLDST_EXT_LEN_MASK 0xffffffff
/* Output data types */
-#define FIFOST_TYPE_SHIFT 16
-#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT)
-
-#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SHIFT 16
+#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT)
+
+#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
/*
* OPERATION Command Constructs
*/
/* Operation type selectors - OP TYPE */
-#define OP_TYPE_SHIFT 24
-#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT)
+#define OP_TYPE_SHIFT 24
+#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT)
-#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT)
-#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT)
-#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT)
-#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT)
-#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT)
-#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT)
+#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT)
+#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT)
+#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT)
+#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT)
/* ProtocolID selectors - PROTID */
-#define OP_PCLID_SHIFT 16
-#define OP_PCLID_MASK (0xff << 16)
+#define OP_PCLID_SHIFT 16
+#define OP_PCLID_MASK (0xff << 16)
/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
-#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT)
-#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT)
-#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
-#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
-#define OP_PCLID_PRF (0x06 << OP_PCLID_SHIFT)
-#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
-#define OP_PCLID_SECRETKEY (0x11 << OP_PCLID_SHIFT)
-#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
-#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
-#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_PRF (0x06 << OP_PCLID_SHIFT)
+#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
+#define OP_PCLID_SECRETKEY (0x11 << OP_PCLID_SHIFT)
+#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
-#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
-#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT)
-#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT)
-#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT)
-#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT)
-#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
-#define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT)
/*
* ProtocolInfo selectors
*/
-#define OP_PCLINFO_MASK 0xffff
+#define OP_PCLINFO_MASK 0xffff
/* for OP_PCLID_IPSEC */
-#define OP_PCL_IPSEC_CIPHER_MASK 0xff00
-#define OP_PCL_IPSEC_AUTH_MASK 0x00ff
-
-#define OP_PCL_IPSEC_DES_IV64 0x0100
-#define OP_PCL_IPSEC_DES 0x0200
-#define OP_PCL_IPSEC_3DES 0x0300
-#define OP_PCL_IPSEC_AES_CBC 0x0c00
-#define OP_PCL_IPSEC_AES_CTR 0x0d00
-#define OP_PCL_IPSEC_AES_XTS 0x1600
-#define OP_PCL_IPSEC_AES_CCM8 0x0e00
-#define OP_PCL_IPSEC_AES_CCM12 0x0f00
-#define OP_PCL_IPSEC_AES_CCM16 0x1000
-#define OP_PCL_IPSEC_AES_GCM8 0x1200
-#define OP_PCL_IPSEC_AES_GCM12 0x1300
-#define OP_PCL_IPSEC_AES_GCM16 0x1400
-
-#define OP_PCL_IPSEC_HMAC_NULL 0x0000
-#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001
-#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002
-#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005
-#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006
-#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007
-#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c
-#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d
-#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e
+#define OP_PCL_IPSEC_CIPHER_MASK 0xff00
+#define OP_PCL_IPSEC_AUTH_MASK 0x00ff
+
+#define OP_PCL_IPSEC_DES_IV64 0x0100
+#define OP_PCL_IPSEC_DES 0x0200
+#define OP_PCL_IPSEC_3DES 0x0300
+#define OP_PCL_IPSEC_AES_CBC 0x0c00
+#define OP_PCL_IPSEC_AES_CTR 0x0d00
+#define OP_PCL_IPSEC_AES_XTS 0x1600
+#define OP_PCL_IPSEC_AES_CCM8 0x0e00
+#define OP_PCL_IPSEC_AES_CCM12 0x0f00
+#define OP_PCL_IPSEC_AES_CCM16 0x1000
+#define OP_PCL_IPSEC_AES_GCM8 0x1200
+#define OP_PCL_IPSEC_AES_GCM12 0x1300
+#define OP_PCL_IPSEC_AES_GCM16 0x1400
+
+#define OP_PCL_IPSEC_HMAC_NULL 0x0000
+#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001
+#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002
+#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005
+#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006
+#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007
+#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c
+#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d
+#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e
/* For SRTP - OP_PCLID_SRTP */
-#define OP_PCL_SRTP_CIPHER_MASK 0xff00
-#define OP_PCL_SRTP_AUTH_MASK 0x00ff
+#define OP_PCL_SRTP_CIPHER_MASK 0xff00
+#define OP_PCL_SRTP_AUTH_MASK 0x00ff
-#define OP_PCL_SRTP_AES_CTR 0x0d00
+#define OP_PCL_SRTP_AES_CTR 0x0d00
-#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
+#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
/* For SSL 3.0 - OP_PCLID_SSL30 */
-#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
-#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
-#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
-
-#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
-
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
-
-#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
-#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
-#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
-#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
-#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
-#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
-#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
-#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_SSL30_RC4_128_MD5 0x0024
-#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
-#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_SSL30_RC4_40_MD5 0x002b
-#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
-#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_SSL30_RC4_128_SHA 0x0020
-#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
-#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
-#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
-#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
-#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
-#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
-#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
-#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
-#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_SSL30_RC4_40_SHA 0x0028
+#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
+#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
+#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
+
+#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
+#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
+#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
+#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
+#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
+#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
+#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
+#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_SSL30_RC4_128_MD5 0x0024
+#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
+#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_SSL30_RC4_40_MD5 0x002b
+#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
+#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_SSL30_RC4_128_SHA 0x0020
+#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
+#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
+#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
+#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
+#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
+#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
+#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
+#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
+#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_SSL30_RC4_40_SHA 0x0028
/* For TLS 1.0 - OP_PCLID_TLS10 */
-#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
-
-
-#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS10_RC4_128_MD5 0x0024
-#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS10_RC4_40_MD5 0x002b
-#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS10_RC4_128_SHA 0x0020
-#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS10_RC4_40_SHA 0x0028
-
-#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
+#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
+
+
+#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS10_RC4_128_MD5 0x0024
+#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS10_RC4_40_MD5 0x002b
+#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS10_RC4_128_SHA 0x0020
+#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS10_RC4_40_SHA 0x0028
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
/* For TLS 1.1 - OP_PCLID_TLS11 */
-#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS11_RC4_128_MD5 0x0024
-#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS11_RC4_40_MD5 0x002b
-#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS11_RC4_128_SHA 0x0020
-#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS11_RC4_40_SHA 0x0028
-
-#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
+#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS11_RC4_128_MD5 0x0024
+#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS11_RC4_40_MD5 0x002b
+#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS11_RC4_128_SHA 0x0020
+#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS11_RC4_40_SHA 0x0028
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
/* For TLS 1.2 - OP_PCLID_TLS12 */
-#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS12_RC4_128_MD5 0x0024
-#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS12_RC4_40_MD5 0x002b
-#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS12_RC4_128_SHA 0x0020
-#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS12_RC4_40_SHA 0x0028
-
-/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
-
-/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
+#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS12_RC4_128_MD5 0x0024
+#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS12_RC4_40_MD5 0x002b
+#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS12_RC4_128_SHA 0x0020
+#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS12_RC4_40_SHA 0x0028
+
+/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
+
+/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
-#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
+#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
/* For DTLS - OP_PCLID_DTLS */
-#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
-#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
-#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
-
-#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
-#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
-
-
-#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
-#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
-#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
-#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
-#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
-#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
-#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
-
-
-#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
+#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
+#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
+#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
+
+#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
+#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
+
+
+#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
+#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
+#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
+#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
+#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
+#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
+#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
+
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
/* 802.16 WiMAX protinfos */
-#define OP_PCL_WIMAX_OFDM 0x0201
-#define OP_PCL_WIMAX_OFDMA 0x0231
+#define OP_PCL_WIMAX_OFDM 0x0201
+#define OP_PCL_WIMAX_OFDMA 0x0231
/* 802.11 WiFi protinfos */
-#define OP_PCL_WIFI 0xac04
+#define OP_PCL_WIFI 0xac04
/* MacSec protinfos */
-#define OP_PCL_MACSEC 0x0001
+#define OP_PCL_MACSEC 0x0001
/* PKI unidirectional protocol protinfo bits */
-#define OP_PCL_PKPROT_TEST 0x0008
-#define OP_PCL_PKPROT_DECRYPT 0x0004
-#define OP_PCL_PKPROT_ECC 0x0002
-#define OP_PCL_PKPROT_F2M 0x0001
+#define OP_PCL_PKPROT_TEST 0x0008
+#define OP_PCL_PKPROT_DECRYPT 0x0004
+#define OP_PCL_PKPROT_ECC 0x0002
+#define OP_PCL_PKPROT_F2M 0x0001
+
+/* Blob protocol protinfo bits */
+#define OP_PCL_BLOB_TK 0x0200
+#define OP_PCL_BLOB_EKT 0x0100
+
+#define OP_PCL_BLOB_K2KR_MEM 0x0000
+#define OP_PCL_BLOB_K2KR_C1KR 0x0010
+#define OP_PCL_BLOB_K2KR_C2KR 0x0030
+#define OP_PCL_BLOB_K2KR_AFHAS 0x0050
+#define OP_PCL_BLOB_K2KR_C2KR_SPLIT 0x0070
+
+#define OP_PCL_BLOB_PTXT_SECMEM 0x0008
+#define OP_PCL_BLOB_BLACK 0x0004
+
+#define OP_PCL_BLOB_FMT_NORMAL 0x0000
+#define OP_PCL_BLOB_FMT_MSTR 0x0002
+#define OP_PCL_BLOB_FMT_TEST 0x0003
/* For non-protocol/alg-only op commands */
#define OP_ALG_TYPE_SHIFT 24
@@ -1162,6 +1204,11 @@
#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
+/* RNG4 set */
+#define OP_ALG_RNG4_SHIFT 4
+#define OP_ALG_RNG4_MASK (0x1f3 << OP_ALG_RNG4_SHIFT)
+
+#define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT)
#define OP_ALG_AS_SHIFT 2
#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
@@ -1181,114 +1228,114 @@
#define OP_ALG_ENCRYPT 1
/* PKHA algorithm type set */
-#define OP_ALG_PK 0x00800000
-#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */
+#define OP_ALG_PK 0x00800000
+#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */
/* PKHA mode clear memory functions */
-#define OP_ALG_PKMODE_A_RAM 0x80000
-#define OP_ALG_PKMODE_B_RAM 0x40000
-#define OP_ALG_PKMODE_E_RAM 0x20000
-#define OP_ALG_PKMODE_N_RAM 0x10000
-#define OP_ALG_PKMODE_CLEARMEM 0x00001
+#define OP_ALG_PKMODE_A_RAM 0x80000
+#define OP_ALG_PKMODE_B_RAM 0x40000
+#define OP_ALG_PKMODE_E_RAM 0x20000
+#define OP_ALG_PKMODE_N_RAM 0x10000
+#define OP_ALG_PKMODE_CLEARMEM 0x00001
/* PKHA mode modular-arithmetic functions */
-#define OP_ALG_PKMODE_MOD_IN_MONTY 0x80000
-#define OP_ALG_PKMODE_MOD_OUT_MONTY 0x40000
-#define OP_ALG_PKMODE_MOD_F2M 0x20000
-#define OP_ALG_PKMODE_MOD_R2_IN 0x10000
-#define OP_ALG_PKMODE_PRJECTV 0x00800
-#define OP_ALG_PKMODE_TIME_EQ 0x400
-#define OP_ALG_PKMODE_OUT_B 0x000
-#define OP_ALG_PKMODE_OUT_A 0x100
-#define OP_ALG_PKMODE_MOD_ADD 0x002
-#define OP_ALG_PKMODE_MOD_SUB_AB 0x003
-#define OP_ALG_PKMODE_MOD_SUB_BA 0x004
-#define OP_ALG_PKMODE_MOD_MULT 0x005
-#define OP_ALG_PKMODE_MOD_EXPO 0x006
-#define OP_ALG_PKMODE_MOD_REDUCT 0x007
-#define OP_ALG_PKMODE_MOD_INV 0x008
-#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009
-#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a
-#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b
-#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c
-#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d
-#define OP_ALG_PKMODE_MOD_GCD 0x00e
-#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
+#define OP_ALG_PKMODE_MOD_IN_MONTY 0x80000
+#define OP_ALG_PKMODE_MOD_OUT_MONTY 0x40000
+#define OP_ALG_PKMODE_MOD_F2M 0x20000
+#define OP_ALG_PKMODE_MOD_R2_IN 0x10000
+#define OP_ALG_PKMODE_PRJECTV 0x00800
+#define OP_ALG_PKMODE_TIME_EQ 0x400
+#define OP_ALG_PKMODE_OUT_B 0x000
+#define OP_ALG_PKMODE_OUT_A 0x100
+#define OP_ALG_PKMODE_MOD_ADD 0x002
+#define OP_ALG_PKMODE_MOD_SUB_AB 0x003
+#define OP_ALG_PKMODE_MOD_SUB_BA 0x004
+#define OP_ALG_PKMODE_MOD_MULT 0x005
+#define OP_ALG_PKMODE_MOD_EXPO 0x006
+#define OP_ALG_PKMODE_MOD_REDUCT 0x007
+#define OP_ALG_PKMODE_MOD_INV 0x008
+#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009
+#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a
+#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b
+#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c
+#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d
+#define OP_ALG_PKMODE_MOD_GCD 0x00e
+#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
/* PKHA mode copy-memory functions */
-#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
-#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_SHIFT 10
-#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8
-#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_SHIFT 6
-#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-
-#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
-#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
-#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-#define OP_ALG_PKMODE_CPYMEM_N_SZ 0x80
-#define OP_ALG_PKMODE_CPYMEM_SRC_SZ 0x81
+#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
+#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_SHIFT 10
+#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8
+#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_SHIFT 6
+#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_CPYMEM_N_SZ 0x80
+#define OP_ALG_PKMODE_CPYMEM_SRC_SZ 0x81
/*
* SEQ_IN_PTR Command Constructs
*/
/* Release Buffers */
-#define SQIN_RBS 0x04000000
+#define SQIN_RBS 0x04000000
/* Sequence pointer is really a descriptor */
-#define SQIN_INL 0x02000000
+#define SQIN_INL 0x02000000
/* Sequence pointer is a scatter-gather table */
-#define SQIN_SGF 0x01000000
+#define SQIN_SGF 0x01000000
/* Appends to a previous pointer */
-#define SQIN_PRE 0x00800000
+#define SQIN_PRE 0x00800000
/* Use extended length following pointer */
-#define SQIN_EXT 0x00400000
+#define SQIN_EXT 0x00400000
/* Restore sequence with pointer/length */
-#define SQIN_RTO 0x00200000
+#define SQIN_RTO 0x00200000
/* Replace job descriptor */
-#define SQIN_RJD 0x00100000
+#define SQIN_RJD 0x00100000
-#define SQIN_LEN_SHIFT 0
-#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT)
+#define SQIN_LEN_SHIFT 0
+#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT)
/*
* SEQ_OUT_PTR Command Constructs
*/
/* Sequence pointer is a scatter-gather table */
-#define SQOUT_SGF 0x01000000
+#define SQOUT_SGF 0x01000000
/* Appends to a previous pointer */
-#define SQOUT_PRE 0x00800000
+#define SQOUT_PRE 0x00800000
/* Restore sequence with pointer/length */
-#define SQOUT_RTO 0x00200000
+#define SQOUT_RTO 0x00200000
/* Use extended length following pointer */
-#define SQOUT_EXT 0x00400000
+#define SQOUT_EXT 0x00400000
-#define SQOUT_LEN_SHIFT 0
-#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT)
+#define SQOUT_LEN_SHIFT 0
+#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT)
/*
@@ -1296,196 +1343,196 @@
*/
/* TYPE field is all that's relevant */
-#define SIGN_TYPE_SHIFT 16
-#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_SHIFT 16
+#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT)
-#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT)
#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
-#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT)
-#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT)
-#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT)
/*
* MOVE Command Constructs
*/
-#define MOVE_AUX_SHIFT 25
-#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT)
-#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT)
-#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT)
-
-#define MOVE_WAITCOMP_SHIFT 24
-#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT)
-#define MOVE_WAITCOMP (1 << MOVE_WAITCOMP_SHIFT)
-
-#define MOVE_SRC_SHIFT 20
-#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT)
-#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
-
-#define MOVE_DEST_SHIFT 16
-#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
-
-#define MOVE_OFFSET_SHIFT 8
-#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT)
-
-#define MOVE_LEN_SHIFT 0
-#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT)
-
-#define MOVELEN_MRSEL_SHIFT 0
-#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
+#define MOVE_AUX_SHIFT 25
+#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT)
+
+#define MOVE_WAITCOMP_SHIFT 24
+#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT)
+#define MOVE_WAITCOMP (1 << MOVE_WAITCOMP_SHIFT)
+
+#define MOVE_SRC_SHIFT 20
+#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
+
+#define MOVE_DEST_SHIFT 16
+#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
+
+#define MOVE_OFFSET_SHIFT 8
+#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT)
+
+#define MOVE_LEN_SHIFT 0
+#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT)
+
+#define MOVELEN_MRSEL_SHIFT 0
+#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
/*
* MATH Command Constructs
*/
-#define MATH_IFB_SHIFT 26
-#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT)
-#define MATH_IFB (1 << MATH_IFB_SHIFT)
+#define MATH_IFB_SHIFT 26
+#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT)
+#define MATH_IFB (1 << MATH_IFB_SHIFT)
-#define MATH_NFU_SHIFT 25
-#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT)
-#define MATH_NFU (1 << MATH_NFU_SHIFT)
+#define MATH_NFU_SHIFT 25
+#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT)
+#define MATH_NFU (1 << MATH_NFU_SHIFT)
-#define MATH_STL_SHIFT 24
-#define MATH_STL_MASK (1 << MATH_STL_SHIFT)
-#define MATH_STL (1 << MATH_STL_SHIFT)
+#define MATH_STL_SHIFT 24
+#define MATH_STL_MASK (1 << MATH_STL_SHIFT)
+#define MATH_STL (1 << MATH_STL_SHIFT)
/* Function selectors */
-#define MATH_FUN_SHIFT 20
-#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT)
-#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT)
-#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT)
-#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT)
-#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT)
-#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT)
-#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT)
-#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT)
-#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT)
-#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT)
-#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT)
-#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT)
+#define MATH_FUN_SHIFT 20
+#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT)
+#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT)
+#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT)
+#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT)
+#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT)
+#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT)
+#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT)
+#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT)
+#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT)
+#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT)
/* Source 0 selectors */
-#define MATH_SRC0_SHIFT 16
-#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT)
-#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT)
-#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT)
-#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SHIFT 16
+#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT)
/* Source 1 selectors */
-#define MATH_SRC1_SHIFT 12
-#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT)
-#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
-#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
-#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
+#define MATH_SRC1_SHIFT 12
+#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
+#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
/* Destination selectors */
-#define MATH_DEST_SHIFT 8
-#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT)
-#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT)
-#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
-#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
-#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
-#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
-#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
-#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
-#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT)
-#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_SHIFT 8
+#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT)
+#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT)
/* Length selectors */
-#define MATH_LEN_SHIFT 0
-#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT)
-#define MATH_LEN_1BYTE 0x01
-#define MATH_LEN_2BYTE 0x02
-#define MATH_LEN_4BYTE 0x04
-#define MATH_LEN_8BYTE 0x08
+#define MATH_LEN_SHIFT 0
+#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT)
+#define MATH_LEN_1BYTE 0x01
+#define MATH_LEN_2BYTE 0x02
+#define MATH_LEN_4BYTE 0x04
+#define MATH_LEN_8BYTE 0x08
/*
* JUMP Command Constructs
*/
-#define JUMP_CLASS_SHIFT 25
+#define JUMP_CLASS_SHIFT 25
#define JUMP_CLASS_MASK (3 << JUMP_CLASS_SHIFT)
#define JUMP_CLASS_NONE 0
#define JUMP_CLASS_CLASS1 (1 << JUMP_CLASS_SHIFT)
#define JUMP_CLASS_CLASS2 (2 << JUMP_CLASS_SHIFT)
#define JUMP_CLASS_BOTH (3 << JUMP_CLASS_SHIFT)
-#define JUMP_JSL_SHIFT 24
-#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT)
-#define JUMP_JSL (1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL_SHIFT 24
+#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL (1 << JUMP_JSL_SHIFT)
-#define JUMP_TYPE_SHIFT 22
-#define JUMP_TYPE_MASK (0x03 << JUMP_TYPE_SHIFT)
-#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
-#define JUMP_TYPE_NONLOCAL (0x01 << JUMP_TYPE_SHIFT)
-#define JUMP_TYPE_HALT (0x02 << JUMP_TYPE_SHIFT)
-#define JUMP_TYPE_HALT_USER (0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_SHIFT 22
+#define JUMP_TYPE_MASK (0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_NONLOCAL (0x01 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT (0x02 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT_USER (0x03 << JUMP_TYPE_SHIFT)
-#define JUMP_TEST_SHIFT 16
-#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT)
-#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT)
-#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT)
-#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT)
-#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_SHIFT 16
+#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT)
/* Condition codes. JSL bit is factored in */
-#define JUMP_COND_SHIFT 8
-#define JUMP_COND_MASK (0x100ff << JUMP_COND_SHIFT)
-#define JUMP_COND_PK_0 (0x80 << JUMP_COND_SHIFT)
-#define JUMP_COND_PK_GCD_1 (0x40 << JUMP_COND_SHIFT)
-#define JUMP_COND_PK_PRIME (0x20 << JUMP_COND_SHIFT)
-#define JUMP_COND_MATH_N (0x08 << JUMP_COND_SHIFT)
-#define JUMP_COND_MATH_Z (0x04 << JUMP_COND_SHIFT)
-#define JUMP_COND_MATH_C (0x02 << JUMP_COND_SHIFT)
-#define JUMP_COND_MATH_NV (0x01 << JUMP_COND_SHIFT)
-
-#define JUMP_COND_JRP ((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_SHRD ((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_SELF ((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_CALM ((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_NIP ((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_NIFP ((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_NOP ((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_NCP ((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
-
-#define JUMP_OFFSET_SHIFT 0
-#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT)
+#define JUMP_COND_SHIFT 8
+#define JUMP_COND_MASK (0x100ff << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_0 (0x80 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_GCD_1 (0x40 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_PRIME (0x20 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_N (0x08 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_Z (0x04 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_C (0x02 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_NV (0x01 << JUMP_COND_SHIFT)
+
+#define JUMP_COND_JRP ((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SHRD ((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SELF ((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_CALM ((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIP ((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIFP ((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NOP ((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NCP ((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
+
+#define JUMP_OFFSET_SHIFT 0
+#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT)
/*
* NFIFO ENTRY
@@ -1500,20 +1547,20 @@
#define NFIFOENTRY_DEST_BOTH (3 << NFIFOENTRY_DEST_SHIFT)
#define NFIFOENTRY_LC2_SHIFT 29
-#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT)
-#define NFIFOENTRY_LC2 (1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2 (1 << NFIFOENTRY_LC2_SHIFT)
#define NFIFOENTRY_LC1_SHIFT 28
-#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT)
-#define NFIFOENTRY_LC1 (1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1 (1 << NFIFOENTRY_LC1_SHIFT)
#define NFIFOENTRY_FC2_SHIFT 27
-#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT)
-#define NFIFOENTRY_FC2 (1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2 (1 << NFIFOENTRY_FC2_SHIFT)
#define NFIFOENTRY_FC1_SHIFT 26
-#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT)
-#define NFIFOENTRY_FC1 (1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1 (1 << NFIFOENTRY_FC1_SHIFT)
#define NFIFOENTRY_STYPE_SHIFT 24
#define NFIFOENTRY_STYPE_MASK (3 << NFIFOENTRY_STYPE_SHIFT)
@@ -1525,60 +1572,59 @@
#define NFIFOENTRY_DTYPE_SHIFT 20
#define NFIFOENTRY_DTYPE_MASK (0xF << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
-
-#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT)
#define NFIFOENTRY_BND_SHIFT 19
-#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT)
-#define NFIFOENTRY_BND (1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND (1 << NFIFOENTRY_BND_SHIFT)
#define NFIFOENTRY_PTYPE_SHIFT 16
#define NFIFOENTRY_PTYPE_MASK (0x7 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_OC_SHIFT 15
-#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT)
-#define NFIFOENTRY_OC (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC_SHIFT 15
+#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC (1 << NFIFOENTRY_OC_SHIFT)
#define NFIFOENTRY_AST_SHIFT 14
-#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_OC_SHIFT)
-#define NFIFOENTRY_AST (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_AST (1 << NFIFOENTRY_OC_SHIFT)
-#define NFIFOENTRY_BM_SHIFT 11
-#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT)
-#define NFIFOENTRY_BM (1 << NFIFOENTRY_BM_SHIFT)
-
-#define NFIFOENTRY_PS_SHIFT 10
-#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT)
-#define NFIFOENTRY_PS (1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_BM_SHIFT 11
+#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_BM (1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_PS_SHIFT 10
+#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_PS (1 << NFIFOENTRY_PS_SHIFT)
#define NFIFOENTRY_DLEN_SHIFT 0
#define NFIFOENTRY_DLEN_MASK (0xFFF << NFIFOENTRY_DLEN_SHIFT)
@@ -1586,20 +1632,4 @@
#define NFIFOENTRY_PLEN_SHIFT 0
#define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT)
-/*
- * PDB internal definitions
- */
-
-/* IPSec ESP CBC Encap/Decap Options */
-#define PDBOPTS_ESPCBC_ARSNONE 0x00 /* no antireplay window */
-#define PDBOPTS_ESPCBC_ARS32 0x40 /* 32-entry antireplay window */
-#define PDBOPTS_ESPCBC_ARS64 0xc0 /* 64-entry antireplay window */
-#define PDBOPTS_ESPCBC_IVSRC 0x20 /* IV comes from internal random gen */
-#define PDBOPTS_ESPCBC_ESN 0x10 /* extended sequence included */
-#define PDBOPTS_ESPCBC_OUTFMT 0x08 /* output only decapsulation (decap) */
-#define PDBOPTS_ESPCBC_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
-#define PDBOPTS_ESPCBC_INCIPHDR 0x04 /* Prepend IP header to output frame */
-#define PDBOPTS_ESPCBC_IPVSN 0x02 /* process IPv6 header */
-#define PDBOPTS_ESPCBC_TUNNEL 0x01 /* tunnel mode next-header byte */
-
#endif /* DESC_H */
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 46915800c26f..0393c98f5b9c 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -1,7 +1,7 @@
/*
* caam descriptor construction helper functions
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
#include "desc.h"
@@ -9,7 +9,8 @@
#define IMMEDIATE (1 << 23)
#define CAAM_CMD_SZ sizeof(u32)
#define CAAM_PTR_SZ sizeof(dma_addr_t)
-#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * 64)
+#define CAAM_EXTLEN_SZ sizeof(u32)
+#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
#ifdef DEBUG
#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
@@ -18,6 +19,10 @@
#define PRINT_POS
#endif
+#define SET_OK_NO_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
+ LDST_SRCDST_WORD_DECOCTRL | \
+ (LDOFF_CHG_SHARE_OK_NO_PROP << \
+ LDST_OFFSET_SHIFT))
#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
LDST_SRCDST_WORD_DECOCTRL | \
(LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
@@ -47,7 +52,7 @@ static inline void *sh_desc_pdb(u32 *desc)
static inline void init_desc(u32 *desc, u32 options)
{
- *desc = options | HDR_ONE | 1;
+ *desc = (options | HDR_ONE) + 1;
}
static inline void init_sh_desc(u32 *desc, u32 options)
@@ -58,9 +63,9 @@ static inline void init_sh_desc(u32 *desc, u32 options)
static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
{
- u32 pdb_len = pdb_bytes / CAAM_CMD_SZ + 1;
+ u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
- init_sh_desc(desc, ((pdb_len << HDR_START_IDX_SHIFT) + pdb_len) |
+ init_sh_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT) + pdb_len) |
options);
}
@@ -113,6 +118,15 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
append_ptr(desc, ptr);
}
+/* Write length after pointer, rather than inside command */
+static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
+ unsigned int len, u32 command)
+{
+ append_cmd(desc, command);
+ append_ptr(desc, ptr);
+ append_cmd(desc, len);
+}
+
static inline void append_cmd_data(u32 *desc, void *data, int len,
u32 command)
{
@@ -162,13 +176,22 @@ static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
}
APPEND_CMD_PTR(key, KEY)
-APPEND_CMD_PTR(seq_in_ptr, SEQ_IN_PTR)
-APPEND_CMD_PTR(seq_out_ptr, SEQ_OUT_PTR)
APPEND_CMD_PTR(load, LOAD)
APPEND_CMD_PTR(store, STORE)
APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
APPEND_CMD_PTR(fifo_store, FIFO_STORE)
+#define APPEND_SEQ_PTR_INTLEN(cmd, op) \
+static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
+ unsigned int len, \
+ u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \
+}
+APPEND_SEQ_PTR_INTLEN(in, IN)
+APPEND_SEQ_PTR_INTLEN(out, OUT)
+
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
unsigned int len, u32 options) \
@@ -179,6 +202,33 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
APPEND_CMD_PTR_TO_IMM(load, LOAD);
APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
+#define APPEND_CMD_PTR_EXTLEN(cmd, op) \
+static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
+ unsigned int len, u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd_ptr_extlen(desc, ptr, len, CMD_##op | SQIN_EXT | options); \
+}
+APPEND_CMD_PTR_EXTLEN(seq_in_ptr, SEQ_IN_PTR)
+APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
+
+/*
+ * Determine whether to store length internally or externally depending on
+ * the size of its type
+ */
+#define APPEND_CMD_PTR_LEN(cmd, op, type) \
+static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
+ type len, u32 options) \
+{ \
+ PRINT_POS; \
+ if (sizeof(type) > sizeof(u16)) \
+ append_##cmd##_extlen(desc, ptr, len, options); \
+ else \
+ append_##cmd##_intlen(desc, ptr, len, options); \
+}
+APPEND_CMD_PTR_LEN(seq_in_ptr, SEQ_IN_PTR, u32)
+APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
+
/*
* 2nd variant for commands whose specified immediate length differs
* from length of immediate data provided, e.g., split keys
@@ -203,3 +253,56 @@ static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
append_cmd(desc, immediate); \
}
APPEND_CMD_RAW_IMM(load, LOAD, u32);
+
+/*
+ * Append math command. Only the last part of destination and source need to
+ * be specified
+ */
+#define APPEND_MATH(op, desc, dest, src_0, src_1, len) \
+append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \
+ MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32) (len & MATH_LEN_MASK));
+
+#define append_math_add(desc, dest, src0, src1, len) \
+ APPEND_MATH(ADD, desc, dest, src0, src1, len)
+#define append_math_sub(desc, dest, src0, src1, len) \
+ APPEND_MATH(SUB, desc, dest, src0, src1, len)
+#define append_math_add_c(desc, dest, src0, src1, len) \
+ APPEND_MATH(ADDC, desc, dest, src0, src1, len)
+#define append_math_sub_b(desc, dest, src0, src1, len) \
+ APPEND_MATH(SUBB, desc, dest, src0, src1, len)
+#define append_math_and(desc, dest, src0, src1, len) \
+ APPEND_MATH(AND, desc, dest, src0, src1, len)
+#define append_math_or(desc, dest, src0, src1, len) \
+ APPEND_MATH(OR, desc, dest, src0, src1, len)
+#define append_math_xor(desc, dest, src0, src1, len) \
+ APPEND_MATH(XOR, desc, dest, src0, src1, len)
+#define append_math_lshift(desc, dest, src0, src1, len) \
+ APPEND_MATH(LSHIFT, desc, dest, src0, src1, len)
+#define append_math_rshift(desc, dest, src0, src1, len) \
+ APPEND_MATH(RSHIFT, desc, dest, src0, src1, len)
+
+/* Exactly one source is IMM. Data is passed in as u32 value */
+#define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \
+do { \
+ APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \
+ append_cmd(desc, data); \
+} while (0);
+
+#define append_math_add_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data)
+#define append_math_sub_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data)
+#define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data)
+#define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data)
+#define append_math_and_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data)
+#define append_math_or_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data)
+#define append_math_xor_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data)
+#define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data)
+#define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data)
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 7e2d54bffad6..1c5bd0334578 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -1,7 +1,7 @@
/*
* CAAM Error Reporting
*
- * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2009-2012 Freescale Semiconductor, Inc.
*/
#include "compat.h"
@@ -104,7 +104,8 @@ static void report_deco_status(u32 status, char *outstr)
{ 0x00, "None. No error." },
{ 0x01, "SGT Length Error. The descriptor is trying to read "
"more data than is contained in the SGT table." },
- { 0x02, "Reserved." },
+ { 0x02, "SGT Null Entry Error. Extension bit was set, but "
+ "SGT entry was null." },
{ 0x03, "Job Ring Control Error. There is a bad value in the "
"Job Ring Control register." },
{ 0x04, "Invalid Descriptor Command. The Descriptor Command "
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 02c7baa1748e..177cbc40fa10 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -1,7 +1,7 @@
/*
* CAAM Error Reporting code header
*
- * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ * Copyright 2012 Freescale Semiconductor, Inc.
*/
#ifndef CAAM_ERROR_H
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index a34be01b0b29..83c5adef00f5 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -2,7 +2,7 @@
* CAAM/SEC 4.x driver backend
* Private/internal definitions between modules
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
*
*/
@@ -12,6 +12,9 @@
#define JOBR_UNASSIGNED 0
#define JOBR_ASSIGNED 1
+/* Default clock/sample settings for an RNG4 entropy source */
+#define RNG4_ENT_CLOCKS_SAMPLE 1600
+
/* Currently comes from Kconfig param as a ^2 (driver-required) */
#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
@@ -26,6 +29,23 @@
#define JOBR_INTC_COUNT_THLD 0
#endif
+#ifndef CONFIG_OF
+#define JR_IRQRES_NAME_ROOT "irq_jr"
+#define JR_MEMRES_NAME_ROOT "offset_jr"
+#endif
+
+#ifdef CONFIG_ARM
+/*
+ * FIXME: ARM tree doesn't seem to provide this, ergo it seems to be
+ * in "platform limbo". Find a better place, perhaps.
+ */
+static inline void irq_dispose_mapping(unsigned int virq)
+{
+ return;
+}
+#endif
+
+
/*
* Storage for tracking each in-process entry moving across a ring
* Each entry on an output ring needs one of these
@@ -75,6 +95,9 @@ struct caam_drv_private {
struct caam_deco **deco; /* DECO/CCB views */
struct caam_assurance *ac;
struct caam_queue_if *qi; /* QI control region */
+ struct snvs_full __iomem *snvs; /* SNVS HP+LP register space */
+ dma_addr_t __iomem *sm_base; /* Secure memory storage base */
+ u32 sm_size;
/*
* Detected geometry block. Filled in from device tree if powerpc,
@@ -83,6 +106,7 @@ struct caam_drv_private {
u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
+ int rng_inst; /* Total instantiated RNGs */
/* which jr allocated to scatterlist crypto */
atomic_t tfm_count ____cacheline_aligned;
@@ -90,7 +114,20 @@ struct caam_drv_private {
struct device **algapi_jr;
/* list of registered crypto algorithms (mk generic context handle?) */
struct list_head alg_list;
+ /* list of registered hash algorithms (mk generic context handle?) */
+ struct list_head hash_list;
+
+#ifdef CONFIG_ARM
+ struct clk *caam_clk;
+#endif
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_SM
+ struct device *smdev; /* Secure Memory dev */
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO
+ struct device *secviodev;
+#endif
/*
* debugfs entries for developer view into driver/device
* variables at runtime.
@@ -108,6 +145,43 @@ struct caam_drv_private {
#endif
};
-void caam_jr_algapi_init(struct device *dev);
-void caam_jr_algapi_remove(struct device *dev);
+/*
+ * These startup/shutdown functions exist to enable API startup/shutdown
+ * outside of the OF device detection framework. It's necessary for ARM
+ * kernels as presently delivered.
+ *
+ * Once ARM kernels are shipping with OF support, these functions can
+ * be re-integrated into the normal probe startup/exit functions,
+ * and these prototypes can then be removed.
+ */
+#ifndef CONFIG_OF
+void caam_algapi_shutdown(struct platform_device *pdev);
+int caam_algapi_startup(struct platform_device *pdev);
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
+int caam_algapi_hash_startup(struct platform_device *pdev);
+void caam_algapi_hash_shutdown(struct platform_device *pdev);
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
+int caam_rng_startup(struct platform_device *pdev);
+void caam_rng_shutdown(void);
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_SM
+int caam_sm_startup(struct platform_device *pdev);
+void caam_sm_shutdown(struct platform_device *pdev);
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_SM_TEST
+int caam_sm_example_init(struct platform_device *pdev);
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO
+int caam_secvio_startup(struct platform_device *pdev);
+void caam_secvio_shutdown(struct platform_device *pdev);
+#endif /* SECVIO */
+
+#endif /* CONFIG_OF */
+
#endif /* INTERN_H */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 340fa322c0f0..40bca549e236 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -2,7 +2,7 @@
* CAAM/SEC 4.x transport/backend driver
* JobR backend functionality
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
#include "compat.h"
@@ -57,9 +57,18 @@ static void caam_jr_dequeue(unsigned long devarg)
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
+ dma_addr_t outbusaddr;
void *userarg;
unsigned long flags;
+ outbusaddr = rd_reg64(&jrp->rregs->outring_base);
+ /* CAAM: Fix incorrect invalidate call for output ring */
+ /*
+ dma_sync_single_for_cpu(dev, outbusaddr,
+ sizeof(struct jr_outentry) * JOBR_DEPTH,
+ DMA_FROM_DEVICE);
+ */
+ /* -----END---- */
spin_lock_irqsave(&jrp->outlock, flags);
head = ACCESS_ONCE(jrp->head);
@@ -69,11 +78,15 @@ static void caam_jr_dequeue(unsigned long devarg)
rd_reg32(&jrp->rregs->outring_used)) {
hw_idx = jrp->out_ring_read_index;
+ /* CAAM: Fix incorrect invalidate call for output ring */
+ dma_sync_single_for_cpu(dev, outbusaddr,
+ sizeof(struct jr_outentry) * JOBR_DEPTH,
+ DMA_FROM_DEVICE);
+ /* -----END---- */
for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
sw_idx = (tail + i) & (JOBR_DEPTH - 1);
smp_read_barrier_depends();
-
if (jrp->outring[hw_idx].desc ==
jrp->entinfo[sw_idx].desc_addr_dma)
break; /* found */
@@ -240,7 +253,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
struct caam_jrentry_info *head_entry;
unsigned long flags;
int head, tail, desc_size;
- dma_addr_t desc_dma;
+ dma_addr_t desc_dma, inpbusaddr;
desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
@@ -249,6 +262,12 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
return -EIO;
}
+ dma_sync_single_for_device(dev, desc_dma, desc_size, DMA_TO_DEVICE);
+
+ inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
+ dma_sync_single_for_device(dev, inpbusaddr,
+ sizeof(dma_addr_t) * JOBR_DEPTH,
+ DMA_TO_DEVICE);
spin_lock_irqsave(&jrp->inplock, flags);
head = jrp->head;
@@ -270,6 +289,10 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
+ dma_sync_single_for_device(dev, inpbusaddr,
+ sizeof(dma_addr_t) * JOBR_DEPTH,
+ DMA_TO_DEVICE);
+
smp_wmb();
jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
@@ -343,7 +366,7 @@ static int caam_jr_init(struct device *dev)
(unsigned long)dev);
error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
- "caam-jobr", dev);
+ "caam-jr", dev);
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
@@ -377,7 +400,7 @@ static int caam_jr_init(struct device *dev)
/* Setup rings */
inpbusaddr = dma_map_single(dev, jrp->inpring,
sizeof(u32 *) * JOBR_DEPTH,
- DMA_BIDIRECTIONAL);
+ DMA_TO_DEVICE);
if (dma_mapping_error(dev, inpbusaddr)) {
dev_err(dev, "caam_jr_init(): can't map input ring\n");
kfree(jrp->inpring);
@@ -388,12 +411,12 @@ static int caam_jr_init(struct device *dev)
outbusaddr = dma_map_single(dev, jrp->outring,
sizeof(struct jr_outentry) * JOBR_DEPTH,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
if (dma_mapping_error(dev, outbusaddr)) {
dev_err(dev, "caam_jr_init(): can't map output ring\n");
dma_unmap_single(dev, inpbusaddr,
sizeof(u32 *) * JOBR_DEPTH,
- DMA_BIDIRECTIONAL);
+ DMA_TO_DEVICE);
kfree(jrp->inpring);
kfree(jrp->outring);
kfree(jrp->entinfo);
@@ -446,11 +469,9 @@ int caam_jr_shutdown(struct device *dev)
outbusaddr = rd_reg64(&jrp->rregs->outring_base);
dma_unmap_single(dev, outbusaddr,
sizeof(struct jr_outentry) * JOBR_DEPTH,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH,
- DMA_BIDIRECTIONAL);
- kfree(jrp->outring);
- kfree(jrp->inpring);
+ DMA_TO_DEVICE);
kfree(jrp->entinfo);
return ret;
@@ -467,8 +488,12 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
struct platform_device *jr_pdev;
struct caam_drv_private *ctrlpriv;
struct caam_drv_private_jr *jrpriv;
- u32 *jroffset;
int error;
+ /* FIXME: perhaps "struct resource *" for OF and non? */
+ u32 *jroffset, *irqres;
+#ifndef CONFIG_OF
+ char *rname, rinst;
+#endif
ctrldev = &pdev->dev;
ctrlpriv = dev_get_drvdata(ctrldev);
@@ -489,12 +514,35 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
* need to add in the offset to this JobR. Don't know if I
* like this long-term, but it'll run
*/
+#ifdef CONFIG_OF
jroffset = (u32 *)of_get_property(np, "reg", NULL);
+#else
+ rname = kmalloc(strlen(JR_MEMRES_NAME_ROOT) + 1, 0);
+ if (rname == NULL) {
+ dev_err(ctrldev, "can't alloc resource detection buffer %d\n",
+ ring);
+ kfree(jrpriv);
+ return -ENOMEM;
+ }
+ rname[0] = 0;
+ rinst = '0' + ring;
+ strcat(rname, JR_MEMRES_NAME_ROOT);
+ strncat(rname, &rinst, 1);
+ jroffset = (u32 *)platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ rname);
+ kfree(rname);
+#endif
jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
+ *jroffset);
/* Build a local dev for each detected queue */
+#ifdef CONFIG_OF
jr_pdev = of_platform_device_create(np, NULL, ctrldev);
+#else
+ jr_pdev = platform_device_register_data(ctrldev, "caam_jr", ring,
+ jrpriv,
+ sizeof(struct caam_drv_private_jr));
+#endif
if (jr_pdev == NULL) {
kfree(jrpriv);
return -EINVAL;
@@ -504,7 +552,24 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
ctrlpriv->jrdev[ring] = jrdev;
/* Identify the interrupt */
+#ifdef CONFIG_OF
jrpriv->irq = of_irq_to_resource(np, 0, NULL);
+#else
+ rname = kmalloc(strlen(JR_IRQRES_NAME_ROOT) + 1, 0);
+ if (rname == NULL) {
+ dev_err(ctrldev, "can't alloc resource detection buffer %d\n",
+ ring);
+ kfree(jrpriv);
+ return -ENOMEM;
+ }
+ rname[0] = 0;
+ strcat(rname, JR_IRQRES_NAME_ROOT);
+ strncat(rname, &rinst, 1);
+ irqres = (u32 *)platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ rname);
+ jrpriv->irq = *irqres;
+ kfree(rname);
+#endif
/* Now do the platform independent part */
error = caam_jr_init(jrdev); /* now turn on hardware */
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
new file mode 100644
index 000000000000..8e67628b3c1b
--- /dev/null
+++ b/drivers/crypto/caam/key_gen.c
@@ -0,0 +1,124 @@
+/*
+ * CAAM/SEC 4.x functions for handling key-generation jobs
+ *
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
+ *
+ */
+#include "compat.h"
+#include "jr.h"
+#include "error.h"
+#include "desc_constr.h"
+#include "key_gen.h"
+
+void split_key_done(struct device *dev, u32 *desc, u32 err,
+ void *context)
+{
+ struct split_key_result *res = context;
+
+#ifdef DEBUG
+ dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ res->err = err;
+
+ complete(&res->completion);
+}
+EXPORT_SYMBOL(split_key_done);
+/*
+get a split ipad/opad key
+
+Split key generation-----------------------------------------------
+
+[00] 0xb0810008 jobdesc: stidx=1 share=never len=8
+[01] 0x04000014 key: class2->keyreg len=20
+ @0xffe01000
+[03] 0x84410014 operation: cls2-op sha1 hmac init dec
+[04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm
+[05] 0xa4000001 jump: class2 local all ->1 [06]
+[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
+ @0xffe04000
+*/
+u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+ int split_key_pad_len, const u8 *key_in, u32 keylen,
+ u32 alg_op)
+{
+ u32 *desc;
+ struct split_key_result result;
+ dma_addr_t dma_addr_in, dma_addr_out;
+ int ret = 0;
+
+ desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+
+ init_job_desc(desc, 0);
+
+ dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, dma_addr_in)) {
+ dev_err(jrdev, "unable to map key input memory\n");
+ kfree(desc);
+ return -ENOMEM;
+ }
+ dma_sync_single_for_device(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
+ append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
+
+ /* Sets MDHA up into an HMAC-INIT */
+ append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
+
+ /*
+ * do a FIFO_LOAD of zero, this will trigger the internal key expansion
+ * into both pads inside MDHA
+ */
+ append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
+
+ /*
+ * FIFO_STORE with the explicit split-key content store
+ * (0x26 output type)
+ */
+ dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dma_addr_out)) {
+ dev_err(jrdev, "unable to map key output memory\n");
+ kfree(desc);
+ return -ENOMEM;
+ }
+ append_fifo_store(desc, dma_addr_out, split_key_len,
+ LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ result.err = 0;
+ init_completion(&result.completion);
+
+ ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+ if (!ret) {
+ /* in progress */
+ wait_for_completion_interruptible(&result.completion);
+ ret = result.err;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_out,
+ split_key_pad_len, 1);
+#endif
+ }
+ dma_sync_single_for_cpu(jrdev, dma_addr_out, split_key_pad_len,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
+
+ kfree(desc);
+
+ return ret;
+}
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
new file mode 100644
index 000000000000..2306b3260614
--- /dev/null
+++ b/drivers/crypto/caam/key_gen.h
@@ -0,0 +1,17 @@
+/*
+ * CAAM/SEC 4.x definitions for handling key-generation jobs
+ *
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
+ *
+ */
+
+struct split_key_result {
+ struct completion completion;
+ int err;
+};
+
+void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
+
+u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+ int split_key_pad_len, const u8 *key_in, u32 keylen,
+ u32 alg_op);
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index aee394e39056..248dd932fe44 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -1,7 +1,7 @@
/*
* CAAM hardware register-level view
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
*/
#ifndef REGS_H
@@ -74,15 +74,21 @@
#endif
#else
#ifdef __LITTLE_ENDIAN
-#define wr_reg32(reg, data) __raw_writel(reg, data)
-#define rd_reg32(reg) __raw_readl(reg)
+#define wr_reg32(reg, data) writel(data, reg)
+#define rd_reg32(reg) readl(reg)
#ifdef CONFIG_64BIT
-#define wr_reg64(reg, data) __raw_writeq(reg, data)
-#define rd_reg64(reg) __raw_readq(reg)
+#define wr_reg64(reg, data) writeq(data, reg)
+#define rd_reg64(reg) readq(reg)
#endif
#endif
#endif
+#ifdef CONFIG_ARM
+/* These are common macros for Power, put here for ARMs */
+#define setbits32(_addr, _v) writel((readl(_addr) | (_v)), (_addr))
+#define clrbits32(_addr, _v) writel((readl(_addr) & ~(_v)), (_addr))
+#endif
+
#ifndef CONFIG_64BIT
static inline void wr_reg64(u64 __iomem *reg, u64 data)
{
@@ -107,16 +113,103 @@ struct jr_outentry {
} __packed;
/*
+ * CHA version ID / instantiation bitfields
+ * Defined for use within cha_id in perfmon
+ * Note that the same shift/mask selectors can be used to pull out number
+ * of instantiated blocks within cha_num in perfmon, the locations are
+ * the same.
+ */
+
+/* Job Ring */
+#define CHA_ID_JR_SHIFT 60
+#define CHA_ID_JR_MASK (0xfull << CHA_ID_JR_SHIFT)
+
+/* DEscriptor COntroller */
+#define CHA_ID_DECO_SHIFT 56
+#define CHA_ID_DECO_MASK (0xfull << CHA_ID_DECO_SHIFT)
+#define CHA_NUM_DECONUM_SHIFT 56 /* legacy definition */
+#define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT)
+
+/* ZUC-Authentication */
+#define CHA_ID_ZA_SHIFT 44
+#define CHA_ID_ZA_MASK (0xfull << CHA_ID_ZA_SHIFT)
+
+/* ZUC-Encryption */
+#define CHA_ID_ZE_SHIFT 40
+#define CHA_ID_ZE_MASK (0xfull << CHA_ID_ZE_SHIFT)
+
+/* SNOW f9 */
+#define CHA_ID_SNW9_SHIFT 36
+#define CHA_ID_SNW9_MASK (0xfull << CHA_ID_SNW9_SHIFT)
+
+/* CRC */
+#define CHA_ID_CRC_SHIFT 32
+#define CHA_ID_CRC_MASK (0xfull << CHA_ID_CRC_SHIFT)
+
+/* Public Key */
+#define CHA_ID_PK_SHIFT 28
+#define CHA_ID_PK_MASK (0xfull << CHA_ID_PK_SHIFT)
+
+/* Kasumi */
+#define CHA_ID_KAS_SHIFT 24
+#define CHA_ID_KAS_MASK (0xfull << CHA_ID_KAS_SHIFT)
+
+/* SNOW f8 */
+#define CHA_ID_SNW8_SHIFT 20
+#define CHA_ID_SNW8_MASK (0xfull << CHA_ID_SNW8_SHIFT)
+
+/*
+ * Random Generator
+ * RNG4 = FIPS-verification-compliant, requires init kickstart for use
+ */
+#define CHA_ID_RNG_SHIFT 16
+#define CHA_ID_RNG_MASK (0xfull << CHA_ID_RNG_SHIFT)
+#define CHA_ID_RNG_A (0x1ull << CHA_ID_RNG_SHIFT)
+#define CHA_ID_RNG_B (0x2ull << CHA_ID_RNG_SHIFT)
+#define CHA_ID_RNG_C (0x3ull << CHA_ID_RNG_SHIFT)
+#define CHA_ID_RNG_4 (0x4ull << CHA_ID_RNG_SHIFT)
+
+/*
+ * Message Digest
+ * LP256 = Low Power (MD5/SHA1/SHA224/SHA256 + HMAC)
+ * LP512 = Low Power (LP256 + SHA384/SHA512)
+ * HP = High Power (LP512 + SMAC)
+ */
+#define CHA_ID_MD_SHIFT 12
+#define CHA_ID_MD_MASK (0xfull << CHA_ID_MD_SHIFT)
+#define CHA_ID_MD_LP256 (0x0ull << CHA_ID_MD_SHIFT)
+#define CHA_ID_MD_LP512 (0x1ull << CHA_ID_MD_SHIFT)
+#define CHA_ID_MD_HP (0x2ull << CHA_ID_MD_SHIFT)
+
+/* ARC4 Streamcipher */
+#define CHA_ID_ARC4_SHIFT 8
+#define CHA_ID_ARC4_MASK (0xfull << CHA_ID_ARC4_SHIFT)
+#define CHA_ID_ARC4_LP (0x0ull << CHA_ID_ARC4_SHIFT)
+#define CHA_ID_ARC4_HP (0x1ull << CHA_ID_ARC4_SHIFT)
+
+/* DES Blockcipher Accelerator */
+#define CHA_ID_DES_SHIFT 4
+#define CHA_ID_DES_MASK (0xfull << CHA_ID_DES_SHIFT)
+
+/*
+ * AES Blockcipher + Combo Mode Accelerator
+ * LP = Low Power (includes ECB/CBC/CFB128/OFB/CTR/CCM/CMAC/XCBC-MAC)
+ * HP = High Power (LP + CBCXCBC/CTRXCBC/XTS/GCM)
+ * DIFFPWR = ORed in if differential-power-analysis resistance implemented
+ */
+#define CHA_ID_AES_SHIFT 0
+#define CHA_ID_AES_MASK (0xfull << CHA_ID_AES_SHIFT)
+#define CHA_ID_AES_LP (0x3ull << CHA_ID_AES_SHIFT)
+#define CHA_ID_AES_HP (0x4ull << CHA_ID_AES_SHIFT)
+#define CHA_ID_AES_DIFFPWR (0x1ull << CHA_ID_AES_SHIFT)
+
+/*
* caam_perfmon - Performance Monitor/Secure Memory Status/
* CAAM Global Status/Component Version IDs
*
* Spans f00-fff wherever instantiated
*/
-/* Number of DECOs */
-#define CHA_NUM_DECONUM_SHIFT 56
-#define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT)
-
struct caam_perfmon {
/* Performance Monitor Registers f00-f9f */
u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */
@@ -133,15 +226,21 @@ struct caam_perfmon {
#define CTPR_QI_SHIFT 57
#define CTPR_QI_MASK (0x1ull << CTPR_QI_SHIFT)
u64 comp_parms; /* CTPR - Compile Parameters Register */
- u64 rsvd1[2];
+
+ /* Secure Memory State Visibility */
+ u32 rsvd1;
+ u32 smstatus; /* Secure memory status */
+ u32 rsvd2;
+ u32 smpartown; /* Secure memory partition owner */
/* CAAM Global Status fc0-fdf */
u64 faultaddr; /* FAR - Fault Address */
u32 faultliodn; /* FALR - Fault Address LIODN */
u32 faultdetail; /* FADR - Fault Addr Detail */
- u32 rsvd2;
+ u32 rsvd3;
u32 status; /* CSTA - CAAM Status */
- u64 rsvd3;
+ u32 smpart; /* Secure Memory Partition Parameters */
+ u32 smvid; /* Secure Memory Version ID */
/* Component Instantiation Parameters fe0-fff */
u32 rtic_id; /* RVID - RTIC Version ID */
@@ -151,6 +250,62 @@ struct caam_perfmon {
u64 caam_id; /* CAAMVID - CAAM Version ID */
};
+#define SMSTATUS_PART_SHIFT 28
+#define SMSTATUS_PART_MASK (0xf << SMSTATUS_PART_SHIFT)
+#define SMSTATUS_PAGE_SHIFT 16
+#define SMSTATUS_PAGE_MASK (0x7ff << SMSTATUS_PAGE_SHIFT)
+#define SMSTATUS_MID_SHIFT 8
+#define SMSTATUS_MID_MASK (0x3f << SMSTATUS_MID_SHIFT)
+#define SMSTATUS_ACCERR_SHIFT 4
+#define SMSTATUS_ACCERR_MASK (0xf << SMSTATUS_ACCERR_SHIFT)
+#define SMSTATUS_ACCERR_NONE 0
+#define SMSTATUS_ACCERR_ALLOC 1 /* Page not allocated */
+#define SMSTATUS_ACCESS_ID 2 /* Not granted by ID */
+#define SMSTATUS_ACCESS_WRITE 3 /* Writes not allowed */
+#define SMSTATUS_ACCESS_READ 4 /* Reads not allowed */
+#define SMSTATUS_ACCESS_NONKEY 6 /* Non-key reads not allowed */
+#define SMSTATUS_ACCESS_BLOB 9 /* Blob access not allowed */
+#define SMSTATUS_ACCESS_DESCB 10 /* Descriptor Blob access spans pages */
+#define SMSTATUS_ACCESS_NON_SM 11 /* Outside Secure Memory range */
+#define SMSTATUS_ACCESS_XPAGE 12 /* Access crosses pages */
+#define SMSTATUS_ACCESS_INITPG 13 /* Page still initializing */
+#define SMSTATUS_STATE_SHIFT 0
+#define SMSTATUS_STATE_MASK (0xf << SMSTATUS_STATE_SHIFT)
+#define SMSTATUS_STATE_RESET 0
+#define SMSTATUS_STATE_INIT 1
+#define SMSTATUS_STATE_NORMAL 2
+#define SMSTATUS_STATE_FAIL 3
+
+/* up to 15 rings, 2 bits shifted by ring number */
+#define SMPARTOWN_RING_SHIFT 2
+#define SMPARTOWN_RING_MASK 3
+#define SMPARTOWN_AVAILABLE 0
+#define SMPARTOWN_NOEXIST 1
+#define SMPARTOWN_UNAVAILABLE 2
+#define SMPARTOWN_OURS 3
+
+/* Maximum number of pages possible */
+#define SMPART_MAX_NUMPG_SHIFT 16
+#define SMPART_MAX_NUMPG_MASK (0x3f << SMPART_MAX_NUMPG_SHIFT)
+
+/* Maximum partition number */
+#define SMPART_MAX_PNUM_SHIFT 12
+#define SMPART_MAX_PNUM_MASK (0xf << SMPART_MAX_PNUM_SHIFT)
+
+/* Highest possible page number */
+#define SMPART_MAX_PG_SHIFT 0
+#define SMPART_MAX_PG_MASK (0x3f << SMPART_MAX_PG_SHIFT)
+
+/* Max size of a page */
+#define SMVID_PG_SIZE_SHIFT 16
+#define SMVID_PG_SIZE_MASK (0x7 << SMVID_PG_SIZE_SHIFT)
+
+/* Major/Minor Version ID */
+#define SMVID_MAJ_VERS_SHIFT 8
+#define SMVID_MAJ_VERS (0xf << SMVID_MAJ_VERS_SHIFT)
+#define SMVID_MIN_VERS_SHIFT 0
+#define SMVID_MIN_VERS (0xf << SMVID_MIN_VERS_SHIFT)
+
/* LIODN programming for DMA configuration */
#define MSTRID_LOCK_LIODN 0x80000000
#define MSTRID_LOCK_MAKETRUSTED 0x00010000 /* only for JR masterid */
@@ -167,7 +322,7 @@ struct partid {
u32 pidr; /* partition ID, DECO */
};
-/* RNG test mode (replicated twice in some configurations) */
+/* RNGB test mode (replicated twice in some configurations) */
/* Padded out to 0x100 */
struct rngtst {
u32 mode; /* RTSTMODEx - Test mode */
@@ -200,6 +355,32 @@ struct rngtst {
u32 rsvd14[15];
};
+/* RNG4 TRNG test registers */
+struct rng4tst {
+#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
+#define RTMCTL_OSC_DIV_MASK 0xc /* select oscillator divider value */
+ u32 rtmctl; /* misc. control register */
+ u32 rtscmisc; /* statistical check misc. register */
+ u32 rtpkrrng; /* poker range register */
+ union {
+ u32 rtpkrmax; /* PRGM=1: poker max. limit register */
+ u32 rtpkrsq; /* PRGM=0: poker square calc. result register */
+ };
+#define RTSDCTL_ENT_DLY_SHIFT 16
+#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
+ u32 rtsdctl; /* seed control register */
+ union {
+ u32 rtsblim; /* PRGM=1: sparse bit limit register */
+ u32 rttotsam; /* PRGM=0: total samples register */
+ };
+ u32 rtfrqmin; /* frequency count min. limit register */
+ union {
+ u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */
+ u32 rtfrqcnt; /* PRGM=0: freq. count register */
+ };
+ u32 rsvd1[56];
+};
+
/*
* caam_ctrl - basic core configuration
* starts base + 0x0000 padded out to 0x1000
@@ -249,7 +430,10 @@ struct caam_ctrl {
/* RNG Test/Verification/Debug Access 600-7ff */
/* (Useful in Test/Debug modes only...) */
- struct rngtst rtst[2];
+ union {
+ struct rngtst rtst[2];
+ struct rng4tst r4tst[2];
+ };
u32 rsvd9[448];
@@ -323,7 +507,18 @@ struct caam_job_ring {
u32 rsvd11;
u32 jrcommand; /* JRCRx - JobR command */
- u32 rsvd12[932];
+ u32 rsvd12[33];
+
+ /* Secure Memory Configuration - if you have it */
+ u32 sm_cmd; /* SMCJRx - Secure memory command */
+ u32 rsvd13;
+ u32 sm_status; /* SMCSJRx - Secure memory status */
+ u32 rsvd14;
+ u32 sm_perm; /* SMAPJRx - Secure memory access perms */
+ u32 sm_group2; /* SMAP2JRx - Secure memory access group 2 */
+ u32 sm_group1; /* SMAP1JRx - Secure memory access group 1 */
+
+ u32 rsvd15[891];
/* Performance Monitor f00-fff */
struct caam_perfmon perfmon;
@@ -446,6 +641,62 @@ struct caam_job_ring {
#define JRCR_RESET 0x01
+/* secure memory command */
+#define SMC_PAGE_SHIFT 16
+#define SMC_PAGE_MASK (0xffff << SMC_PAGE_SHIFT)
+#define SMC_PART_SHIFT 8
+#define SMC_PART_MASK (0x0f << SMC_PART_SHIFT)
+#define SMC_CMD_SHIFT 0
+#define SMC_CMD_MASK (0x0f << SMC_CMD_SHIFT)
+
+#define SMC_CMD_ALLOC_PAGE 0x01 /* allocate page to this partition */
+#define SMC_CMD_DEALLOC_PAGE 0x02 /* deallocate page from partition */
+#define SMC_CMD_DEALLOC_PART 0x03 /* deallocate partition */
+#define SMC_CMD_PAGE_INQUIRY 0x05 /* find partition associate with page */
+
+/* secure memory (command) status */
+#define SMCS_PAGE_SHIFT 16
+#define SMCS_PAGE_MASK (0x0fff << SMCS_PAGE_SHIFT)
+#define SMCS_CMDERR_SHIFT 14
+#define SMCS_CMDERR_MASK (3 << SMCS_CMDERR_SHIFT)
+#define SMCS_ALCERR_SHIFT 12
+#define SMCS_ALCERR_MASK (3 << SMCS_ALCERR_SHIFT)
+#define SMCS_PGOWN_SHIFT 6
+#define SMCS_PGWON_MASK (3 << SMCS_PGOWN_SHIFT)
+#define SMCS_PART_SHIFT 0
+#define SMCS_PART_MASK (0xf << SMCS_PART_SHIFT)
+
+#define SMCS_CMDERR_NONE 0
+#define SMCS_CMDERR_INCOMP 1 /* Command not yet complete */
+#define SMCS_CMDERR_SECFAIL 2 /* Security failure occurred */
+#define SMCS_CMDERR_OVERFLOW 3 /* Command overflow */
+
+#define SMCS_ALCERR_NONE 0
+#define SMCS_ALCERR_PSPERR 1 /* Partion marked PSP (dealloc only) */
+#define SMCS_ALCERR_PAGEAVAIL 2 /* Page not available */
+#define SMCS_ALCERR_PARTOWN 3 /* Partition ownership error */
+
+#define SMCS_PGOWN_AVAIL 0 /* Page is available */
+#define SMCS_PGOWN_NOEXIST 1 /* Page initializing or nonexistent */
+#define SMCS_PGOWN_NOOWN 2 /* Page owned by another processor */
+#define SMCS_PGOWN_OWNED 3 /* Page belongs to this processor */
+
+/* secure memory access permissions */
+#define SMCS_PERM_KEYMOD_SHIFT 16
+#define SMCA_PERM_KEYMOD_MASK (0xff << SMCS_PERM_KEYMOD_SHIFT)
+#define SMCA_PERM_CSP_ZERO 0x8000 /* Zero when deallocated or released */
+#define SMCA_PERM_PSP_LOCK 0x4000 /* Part./pages can't be deallocated */
+#define SMCA_PERM_PERM_LOCK 0x2000 /* Lock permissions */
+#define SMCA_PERM_GRP_LOCK 0x1000 /* Lock access groups */
+#define SMCA_PERM_RINGID_SHIFT 10
+#define SMCA_PERM_RINGID_MASK (3 << SMCA_PERM_RINGID_SHIFT)
+#define SMCA_PERM_G2_BLOB 0x0080 /* Group 2 blob import/export */
+#define SMCA_PERM_G2_WRITE 0x0020 /* Group 2 write */
+#define SMCA_PERM_G2_READ 0x0010 /* Group 2 read */
+#define SMCA_PERM_G1_BLOB 0x0008 /* Group 1... */
+#define SMCA_PERM_G1_WRITE 0x0002
+#define SMCA_PERM_G1_READ 0x0001
+
/*
* caam_assurance - Assurance Controller View
* base + 0x6000 padded out to 0x1000
@@ -657,7 +908,6 @@ struct caam_full {
u64 rsvd[512];
struct caam_assurance assure;
struct caam_queue_if qi;
- struct caam_deco *deco;
};
#endif /* REGS_H */
diff --git a/drivers/crypto/caam/secvio.c b/drivers/crypto/caam/secvio.c
new file mode 100644
index 000000000000..b0719e4173b4
--- /dev/null
+++ b/drivers/crypto/caam/secvio.c
@@ -0,0 +1,310 @@
+
+/*
+ * CAAM/SEC 4.x Security Violation Handler
+ * Copyright 2012 Freescale Semiconductor, Inc., All Rights Reserved
+ */
+
+#include "compat.h"
+#include "intern.h"
+#include "secvio.h"
+#include "regs.h"
+
+/*
+ * These names are associated with each violation handler.
+ * The source names were taken from MX6, and are based on recommendations
+ * for most common SoCs.
+ */
+static const u8 *violation_src_name[] = {
+ "CAAM Security Violation",
+ "JTAG Alarm",
+ "Watchdog",
+ "(reserved)",
+ "External Boot",
+ "Tamper Detect",
+};
+
+/* Top-level security violation interrupt */
+static irqreturn_t caam_secvio_interrupt(int irq, void *snvsdev)
+{
+ struct device *dev = snvsdev;
+ struct caam_drv_private_secvio *svpriv = dev_get_drvdata(dev);
+ u32 irqstate;
+
+ /* Check the HP secvio status register */
+ irqstate = rd_reg32(&svpriv->svregs->hp.secvio_status) |
+ HP_SECVIOST_SECVIOMASK;
+ if (!irqstate)
+ return IRQ_NONE;
+
+ /* Mask out one or more causes for deferred service */
+ clrbits32(&svpriv->svregs->hp.secvio_int_ctl, irqstate);
+
+ /* Now ACK causes */
+ setbits32(&svpriv->svregs->hp.secvio_status, irqstate);
+
+ /* And run deferred service */
+ preempt_disable();
+ tasklet_schedule(&svpriv->irqtask[smp_processor_id()]);
+ preempt_enable();
+
+ return IRQ_HANDLED;
+}
+
+/* Deferred service handler. Tasklet arg is simply the SNVS dev */
+static void caam_secvio_dispatch(unsigned long indev)
+{
+ struct device *dev = (struct device *)indev;
+ struct caam_drv_private_secvio *svpriv = dev_get_drvdata(dev);
+ unsigned long flags, cause;
+ int i;
+
+
+ /*
+ * Capture the interrupt cause, using masked interrupts as
+ * identification. This only works if all are enabled; if
+ * this changes in the future, a "cause queue" will have to
+ * be built
+ */
+ cause = rd_reg32(&svpriv->svregs->hp.secvio_int_ctl) &
+ (HP_SECVIO_INTEN_SRC5 | HP_SECVIO_INTEN_SRC4 |
+ HP_SECVIO_INTEN_SRC3 | HP_SECVIO_INTEN_SRC2 |
+ HP_SECVIO_INTEN_SRC1 | HP_SECVIO_INTEN_SRC0);
+
+ /* Look through causes, call each handler if exists */
+ for (i = 0; i < MAX_SECVIO_SOURCES; i++)
+ if (cause & (1 << i)) {
+ spin_lock_irqsave(&svpriv->svlock, flags);
+ svpriv->intsrc[i].handler(dev, i,
+ svpriv->intsrc[i].ext);
+ spin_unlock_irqrestore(&svpriv->svlock, flags);
+ };
+
+ /* Re-enable now-serviced interrupts */
+ setbits32(&svpriv->svregs->hp.secvio_int_ctl, cause);
+}
+
+/*
+ * Default cause handler, used in lieu of an application-defined handler.
+ * All it does at this time is print a console message. It could force a halt.
+ */
+static void caam_secvio_default(struct device *dev, u32 cause, void *ext)
+{
+ struct caam_drv_private_secvio *svpriv = dev_get_drvdata(dev);
+
+ dev_err(dev, "Unhandled Security Violation Interrupt %d = %s\n",
+ cause, svpriv->intsrc[cause].intname);
+}
+
+/*
+ * Install an application-defined handler for a specified cause
+ * Arguments:
+ * - dev points to SNVS-owning device
+ * - cause interrupt source cause
+ * - handler application-defined handler, gets called with dev
+ * source cause, and locally-defined handler argument
+ * - cause_description points to a string to override the default cause
+ * name, this can be used as an alternate for error
+ * messages and such. If left NULL, the default
+ * description string is used.
+ * - ext pointer to any extra data needed by the handler.
+ */
+int caam_secvio_install_handler(struct device *dev, enum secvio_cause cause,
+ void (*handler)(struct device *dev, u32 cause,
+ void *ext),
+ u8 *cause_description, void *ext)
+{
+ unsigned long flags;
+ struct caam_drv_private_secvio *svpriv;
+
+ svpriv = dev_get_drvdata(dev);
+
+ if ((handler == NULL) || (cause > SECVIO_CAUSE_SOURCE_5))
+ return -EINVAL;
+
+ spin_lock_irqsave(&svpriv->svlock, flags);
+ svpriv->intsrc[cause].handler = handler;
+ if (cause_description != NULL)
+ svpriv->intsrc[cause].intname = cause_description;
+ if (ext != NULL)
+ svpriv->intsrc[cause].ext = ext;
+ spin_unlock_irqrestore(&svpriv->svlock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(caam_secvio_install_handler);
+
+/*
+ * Remove an application-defined handler for a specified cause (and, by
+ * implication, restore the "default".
+ * Arguments:
+ * - dev points to SNVS-owning device
+ * - cause interrupt source cause
+ */
+int caam_secvio_remove_handler(struct device *dev, enum secvio_cause cause)
+{
+ unsigned long flags;
+ struct caam_drv_private_secvio *svpriv;
+
+ svpriv = dev_get_drvdata(dev);
+
+ if (cause > SECVIO_CAUSE_SOURCE_5)
+ return -EINVAL;
+
+ spin_lock_irqsave(&svpriv->svlock, flags);
+ svpriv->intsrc[cause].intname = violation_src_name[cause];
+ svpriv->intsrc[cause].handler = caam_secvio_default;
+ svpriv->intsrc[cause].ext = NULL;
+ spin_unlock_irqrestore(&svpriv->svlock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(caam_secvio_remove_handler);
+
+int caam_secvio_startup(struct platform_device *pdev)
+{
+ struct device *ctrldev, *svdev;
+ struct caam_drv_private *ctrlpriv;
+ struct caam_drv_private_secvio *svpriv;
+ struct platform_device *svpdev;
+ int i, error;
+
+ ctrldev = &pdev->dev;
+ ctrlpriv = dev_get_drvdata(ctrldev);
+
+ /*
+ * Set up the private block for secure memory
+ * Only one instance is possible
+ */
+ svpriv = kzalloc(sizeof(struct caam_drv_private_secvio), GFP_KERNEL);
+ if (svpriv == NULL) {
+ dev_err(ctrldev, "can't alloc private mem for secvio\n");
+ return -ENOMEM;
+ }
+ svpriv->parentdev = ctrldev;
+
+ /* Create the security violation dev */
+#ifdef CONFIG_OF
+ svpdev = of_platform_device_create(np, NULL, ctrldev);
+#else
+ svpdev = platform_device_register_data(ctrldev, "caam_secvio", 0,
+ svpriv,
+ sizeof(struct caam_drv_private_secvio));
+#endif
+ if (svpdev == NULL) {
+ kfree(svpriv);
+ return -EINVAL;
+ }
+ svdev = &svpdev->dev;
+ dev_set_drvdata(svdev, svpriv);
+ ctrlpriv->secviodev = svdev;
+ svpriv->svregs = ctrlpriv->snvs;
+
+ /*
+ * Now we have all the dev data set up. Init interrupt
+ * source descriptions
+ */
+ for (i = 0; i < MAX_SECVIO_SOURCES; i++) {
+ svpriv->intsrc[i].intname = violation_src_name[i];
+ svpriv->intsrc[i].handler = caam_secvio_default;
+ }
+
+ /* Connect main handler */
+ for_each_possible_cpu(i)
+ tasklet_init(&svpriv->irqtask[i], caam_secvio_dispatch,
+ (unsigned long)svdev);
+
+ error = request_irq(ctrlpriv->secvio_irq, caam_secvio_interrupt,
+ IRQF_SHARED, "caam-secvio", svdev);
+ if (error) {
+ dev_err(svdev, "can't connect secvio interrupt\n");
+ irq_dispose_mapping(ctrlpriv->secvio_irq);
+ ctrlpriv->secvio_irq = 0;
+ return -EINVAL;
+ }
+
+ /* Enable all sources */
+ wr_reg32(&svpriv->svregs->hp.secvio_int_ctl, HP_SECVIO_INTEN_ALL);
+
+ dev_info(svdev, "security violation service handlers armed\n");
+
+ return 0;
+}
+
+void caam_secvio_shutdown(struct platform_device *pdev)
+{
+ struct device *ctrldev, *svdev;
+ struct caam_drv_private *priv;
+ struct caam_drv_private_secvio *svpriv;
+ int i;
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+ svdev = priv->secviodev;
+ svpriv = dev_get_drvdata(svdev);
+
+ /* Shut off all sources */
+ wr_reg32(&svpriv->svregs->hp.secvio_int_ctl, 0);
+
+ /* Remove tasklets and release interrupt */
+ for_each_possible_cpu(i)
+ tasklet_kill(&svpriv->irqtask[i]);
+
+ free_irq(priv->secvio_irq, svdev);
+
+ kfree(svpriv);
+}
+
+
+#ifdef CONFIG_OF
+static void __exit caam_secvio_exit(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev)
+ return -ENODEV;
+
+ of_node_put(dev_node);
+
+ caam_sm_shutdown(pdev);
+}
+
+static int __init caam_secvio_init(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+
+ /*
+ * Do of_find_compatible_node() then of_find_device_by_node()
+ * once a functional device tree is available
+ */
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev)
+ return -ENODEV;
+
+ of_node_put(dev_node);
+
+ return caam_secvio_startup(pdev);
+}
+
+module_init(caam_secvio_init);
+module_exit(caam_secvio_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL CAAM/SNVS Security Violation Handler");
+MODULE_AUTHOR("Freescale Semiconductor - NMSG/MAD");
+#endif
diff --git a/drivers/crypto/caam/secvio.h b/drivers/crypto/caam/secvio.h
new file mode 100644
index 000000000000..2f7d5f6270d8
--- /dev/null
+++ b/drivers/crypto/caam/secvio.h
@@ -0,0 +1,64 @@
+
+/*
+ * CAAM Security Violation Handler
+ * Copyright (c) 2012 Freescale Semiconductor, Inc., All Rights Reserved
+ */
+
+#ifndef SECVIO_H
+#define SECVIO_H
+
+#include "snvsregs.h"
+
+
+/*
+ * Defines the published interfaces to install/remove application-specified
+ * handlers for catching violations
+ */
+
+#define MAX_SECVIO_SOURCES 6
+
+/* these are the untranslated causes */
+enum secvio_cause {
+ SECVIO_CAUSE_SOURCE_0,
+ SECVIO_CAUSE_SOURCE_1,
+ SECVIO_CAUSE_SOURCE_2,
+ SECVIO_CAUSE_SOURCE_3,
+ SECVIO_CAUSE_SOURCE_4,
+ SECVIO_CAUSE_SOURCE_5
+};
+
+/* These are common "recommended" cause definitions for most devices */
+#define SECVIO_CAUSE_CAAM_VIOLATION SECVIO_CAUSE_SOURCE_0
+#define SECVIO_CAUSE JTAG_ALARM SECVIO_CAUSE_SOURCE_1
+#define SECVIO_CAUSE_WATCHDOG SECVIO_CAUSE_SOURCE_2
+#define SECVIO_CAUSE_EXTERNAL_BOOT SECVIO_CAUSE_SOURCE_4
+#define SECVIO_CAUSE_TAMPER_DETECT SECVIO_CAUSE_SOURCE_5
+
+int caam_secvio_install_handler(struct device *dev, enum secvio_cause cause,
+ void (*handler)(struct device *dev, u32 cause,
+ void *ext),
+ u8 *cause_description, void *ext);
+int caam_secvio_remove_handler(struct device *dev, enum secvio_cause cause);
+
+/*
+ * Private data definitions for the secvio "driver"
+ */
+
+struct secvio_int_src {
+ const u8 *intname; /* Points to a descriptive name for source */
+ void *ext; /* Extended data to pass to the handler */
+ void (*handler)(struct device *dev, u32 cause, void *ext);
+};
+
+struct caam_drv_private_secvio {
+ struct device *parentdev; /* points back to the controller */
+ spinlock_t svlock ____cacheline_aligned;
+ struct tasklet_struct irqtask[NR_CPUS];
+ struct snvs_full __iomem *svregs; /* both HP and LP domains */
+
+ /* Registered handlers for each violation */
+ struct secvio_int_src intsrc[MAX_SECVIO_SOURCES];
+
+};
+
+#endif /* SECVIO_H */
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
new file mode 100644
index 000000000000..e05fc58c9637
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -0,0 +1,165 @@
+/*
+ * CAAM/SEC 4.x functions for using scatterlists in caam driver
+ *
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
+ *
+ */
+
+struct sec4_sg_entry;
+
+/*
+ * convert single dma address to h/w link table format
+ */
+static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
+ dma_addr_t dma, u32 len, u32 offset)
+{
+#ifndef CONFIG_64BIT
+ sec4_sg_ptr->reserved = 0; /* ensure MSB half is zeroed */
+#endif
+ sec4_sg_ptr->ptr = dma;
+ sec4_sg_ptr->len |= (len & SEC4_SG_LEN_MASK);
+ /* Does not add in buffer pool ID's at this time */
+ sec4_sg_ptr->bpid_offset = (offset & SEC4_SG_OFFS_MASK);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
+ sizeof(struct sec4_sg_entry), 1);
+#endif
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * but does not have final bit; instead, returns last entry
+ */
+static inline struct sec4_sg_entry *
+sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
+ struct sec4_sg_entry *sec4_sg_ptr, u32 offset)
+{
+ while (sg_count) {
+ dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
+ sg_dma_len(sg), offset);
+ sec4_sg_ptr++;
+ sg = scatterwalk_sg_next(sg);
+ sg_count--;
+ }
+ return sec4_sg_ptr - 1;
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
+ struct sec4_sg_entry *sec4_sg_ptr,
+ u32 offset)
+{
+ sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
+ sec4_sg_ptr->len |= SEC4_SG_LEN_FIN;
+}
+
+/* count number of elements in scatterlist */
+static inline int __sg_count(struct scatterlist *sg_list, int nbytes,
+ bool *chained)
+{
+ struct scatterlist *sg = sg_list;
+ int sg_nents = 0;
+
+ while (nbytes > 0) {
+ sg_nents++;
+ nbytes -= sg->length;
+ if (!sg_is_last(sg) && (sg + 1)->length == 0)
+ *chained = true;
+ sg = scatterwalk_sg_next(sg);
+ }
+
+ return sg_nents;
+}
+
+/* derive number of elements in scatterlist, but return 0 for 1 */
+static inline int sg_count(struct scatterlist *sg_list, int nbytes,
+ bool *chained)
+{
+ int sg_nents = __sg_count(sg_list, nbytes, chained);
+
+ if (likely(sg_nents == 1))
+ return 0;
+
+ return sg_nents;
+}
+
+static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg,
+ unsigned int nents, enum dma_data_direction dir,
+ bool chained)
+{
+ if (unlikely(chained)) {
+ int i;
+ for (i = 0; i < nents; i++) {
+ dma_map_sg(dev, sg, 1, dir);
+ sg = scatterwalk_sg_next(sg);
+ }
+ } else {
+ dma_map_sg(dev, sg, nents, dir);
+ }
+
+ if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
+ dma_sync_sg_for_device(dev, sg, nents, dir);
+
+ return nents;
+}
+
+static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
+ unsigned int nents, enum dma_data_direction dir,
+ bool chained)
+{
+ if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
+ dma_sync_sg_for_cpu(dev, sg, nents, dir);
+
+ if (unlikely(chained)) {
+ int i;
+ for (i = 0; i < nents; i++) {
+ dma_unmap_sg(dev, sg, 1, dir);
+ sg = scatterwalk_sg_next(sg);
+ }
+ } else {
+ dma_unmap_sg(dev, sg, nents, dir);
+ }
+ return nents;
+}
+
+/* Copy from len bytes of sg to dest, starting from beginning */
+static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
+{
+ struct scatterlist *current_sg = sg;
+ int cpy_index = 0, next_cpy_index = current_sg->length;
+
+ while (next_cpy_index < len) {
+ memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
+ current_sg->length);
+ current_sg = scatterwalk_sg_next(current_sg);
+ cpy_index = next_cpy_index;
+ next_cpy_index += current_sg->length;
+ }
+ if (cpy_index < len)
+ memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
+ len - cpy_index);
+}
+
+/* Copy sg data, from to_skip to end, to dest */
+static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
+ int to_skip, unsigned int end)
+{
+ struct scatterlist *current_sg = sg;
+ int sg_index, cpy_index;
+
+ sg_index = current_sg->length;
+ while (sg_index <= to_skip) {
+ current_sg = scatterwalk_sg_next(current_sg);
+ sg_index += current_sg->length;
+ }
+ cpy_index = sg_index - to_skip;
+ memcpy(dest, (u8 *) sg_virt(current_sg) +
+ current_sg->length - cpy_index, cpy_index);
+ current_sg = scatterwalk_sg_next(current_sg);
+ if (end - sg_index)
+ sg_copy(dest + cpy_index, current_sg, end - sg_index);
+}
diff --git a/drivers/crypto/caam/sm.h b/drivers/crypto/caam/sm.h
new file mode 100644
index 000000000000..fba4c5679add
--- /dev/null
+++ b/drivers/crypto/caam/sm.h
@@ -0,0 +1,86 @@
+
+/*
+ * CAAM Secure Memory/Keywrap API Definitions
+ * Copyright (c) 2008, 2012 Freescale Semiconductor, Inc.
+ */
+
+#ifndef SM_H
+#define SM_H
+
+
+/* Storage access permissions */
+#define SM_PERM_READ 0x01
+#define SM_PERM_WRITE 0x02
+#define SM_PERM_BLOB 0x03
+
+
+/* Keystore maintenance functions */
+void sm_init_keystore(struct device *dev);
+u32 sm_detect_keystore_units(struct device *dev);
+int sm_establish_keystore(struct device *dev, u32 unit);
+void sm_release_keystore(struct device *dev, u32 unit);
+
+/* Keystore accessor functions */
+extern int sm_keystore_slot_alloc(struct device *dev, u32 unit, u32 size,
+ u32 *slot);
+extern int sm_keystore_slot_dealloc(struct device *dev, u32 unit, u32 slot);
+extern int sm_keystore_slot_load(struct device *dev, u32 unit, u32 slot,
+ const u8 *key_data, u32 key_length);
+extern int sm_keystore_slot_read(struct device *dev, u32 unit, u32 slot,
+ u32 key_length, u8 *key_data);
+extern int sm_keystore_slot_encapsulate(struct device *dev, u32 unit,
+ u32 inslot, u32 outslot, u16 secretlen,
+ u8 *keymod, u16 keymodlen);
+extern int sm_keystore_slot_decapsulate(struct device *dev, u32 unit,
+ u32 inslot, u32 outslot, u16 secretlen,
+ u8 *keymod, u16 keymodlen);
+
+/* Data structure to hold per-slot information */
+struct keystore_data_slot_info {
+ u8 allocated; /* Track slot assignments */
+ u32 key_length; /* Size of the key */
+};
+
+/* Data structure to hold keystore information */
+struct keystore_data {
+ void *base_address; /* Base of the Secure Partition */
+ u32 slot_count; /* Number of slots in the keystore */
+ struct keystore_data_slot_info *slot; /* Per-slot information */
+};
+
+/* store the detected attributes of a secure memory page */
+struct sm_page_descriptor {
+ u16 phys_pagenum; /* may be discontiguous */
+ u16 own_part; /* Owning partition */
+ void *pg_base; /* Calculated virtual address */
+ struct keystore_data *ksdata;
+};
+
+struct caam_drv_private_sm {
+ struct device *parentdev; /* this ends up as the controller */
+ struct device *smringdev; /* ring that owns this instance */
+ spinlock_t kslock ____cacheline_aligned;
+
+ /* Default parameters for geometry */
+ u32 max_pages; /* maximum pages this instance can support */
+ u32 top_partition; /* highest partition number in this instance */
+ u32 top_page; /* highest page number in this instance */
+ u32 page_size; /* page size */
+ u32 slot_size; /* selected size of each storage block */
+
+ /* Partition/Page Allocation Map */
+ u32 localpages; /* Number of pages we can access */
+ struct sm_page_descriptor *pagedesc; /* Allocated per-page */
+
+ /* Installed handlers for keystore access */
+ int (*data_init)(struct device *dev, u32 unit);
+ void (*data_cleanup)(struct device *dev, u32 unit);
+ int (*slot_alloc)(struct device *dev, u32 unit, u32 size, u32 *slot);
+ int (*slot_dealloc)(struct device *dev, u32 unit, u32 slot);
+ void *(*slot_get_address)(struct device *dev, u32 unit, u32 handle);
+ u32 (*slot_get_base)(struct device *dev, u32 unit, u32 handle);
+ u32 (*slot_get_offset)(struct device *dev, u32 unit, u32 handle);
+ u32 (*slot_get_slot_size)(struct device *dev, u32 unit, u32 handle);
+};
+
+#endif /* SM_H */
diff --git a/drivers/crypto/caam/sm_store.c b/drivers/crypto/caam/sm_store.c
new file mode 100644
index 000000000000..b2c508a6da75
--- /dev/null
+++ b/drivers/crypto/caam/sm_store.c
@@ -0,0 +1,890 @@
+
+/*
+ * CAAM Secure Memory Storage Interface
+ * Copyright (c) 2008, 2012 Freescale Semiconductor, Inc.
+ *
+ * Loosely based on the SHW Keystore API for SCC/SCC2
+ * Experimental implementation and NOT intended for upstream use. Expect
+ * this interface to be amended significantly in the future once it becomes
+ * integrated into live applications.
+ *
+ * Known issues:
+ *
+ * - Executes one instance of an secure memory "driver". This is tied to the
+ * fact that job rings can't run as standalone instances in the present
+ * configuration.
+ *
+ * - It does not expose a userspace interface. The value of a userspace
+ * interface for access to secrets is a point for further architectural
+ * discussion.
+ *
+ * - Partition/permission management is not part of this interface. It
+ * depends on some level of "knowledge" agreed upon between bootloader,
+ * provisioning applications, and OS-hosted software (which uses this
+ * driver).
+ *
+ * - No means of identifying the location or purpose of secrets managed by
+ * this interface exists; "slot location" and format of a given secret
+ * needs to be agreed upon between bootloader, provisioner, and OS-hosted
+ * application.
+ */
+
+#include "compat.h"
+#include "regs.h"
+#include "jr.h"
+#include "desc.h"
+#include "intern.h"
+#include "error.h"
+#include "sm.h"
+
+
+#ifdef SM_DEBUG_CONT
+void sm_show_page(struct device *dev, struct sm_page_descriptor *pgdesc)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+ u32 i, *smdata;
+
+ dev_info(dev, "physical page %d content at 0x%08x\n",
+ pgdesc->phys_pagenum, pgdesc->pg_base);
+ smdata = pgdesc->pg_base;
+ for (i = 0; i < (smpriv->page_size / sizeof(u32)); i += 4)
+ dev_info(dev, "[0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ (u32)&smdata[i], smdata[i], smdata[i+1], smdata[i+2],
+ smdata[i+3]);
+}
+#endif
+
+/*
+ * Construct a secure memory blob encapsulation job descriptor
+ *
+ * - desc pointer to hold new (to be allocated) pointer to the generated
+ * descriptor for later use. Calling thread can kfree the
+ * descriptor after execution.
+ * - keymod Physical pointer to key modifier (contiguous piece).
+ * - keymodsz Size of key modifier in bytes (should normally be 8).
+ * - secretbuf Physical pointer (within an accessible secure memory page)
+ * of the secret to be encapsulated.
+ * - outbuf Physical pointer (within an accessible secure memory page)
+ * of the encapsulated output. This will be larger than the
+ * input secret because of the added encapsulation data.
+ * - secretsz Size of input secret, in bytes.
+ * - auth If nonzero, use AES-CCM for encapsulation, else use ECB
+ *
+ * Note: this uses 32-bit pointers at present
+ */
+#define INITIAL_DESCSZ 16 /* size of tmp buffer for descriptor const. */
+static int blob_encap_desc(u32 **desc, dma_addr_t keymod, u16 keymodsz,
+ dma_addr_t secretbuf, dma_addr_t outbuf,
+ u16 secretsz, bool auth)
+{
+ u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
+ u16 dsize, idx;
+
+ memset(tmpdesc, 0, INITIAL_DESCSZ * sizeof(u32));
+ idx = 1;
+
+ /* Load key modifier */
+ tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY |
+ ((12 << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK) |
+ (keymodsz & LDST_LEN_MASK);
+
+ tmpdesc[idx++] = (u32)keymod;
+
+ /* Encapsulate to secure memory */
+ tmpdesc[idx++] = CMD_SEQ_IN_PTR | secretsz;
+ tmpdesc[idx++] = (u32)secretbuf;
+
+ /* Add space for BKEK and MAC tag */
+ tmpdesc[idx++] = CMD_SEQ_IN_PTR | (secretsz + (32 + 16));
+
+ tmpdesc[idx++] = (u32)outbuf;
+ tmpdesc[idx] = CMD_OPERATION | OP_TYPE_ENCAP_PROTOCOL | OP_PCLID_BLOB |
+ OP_PCL_BLOB_PTXT_SECMEM;
+ if (auth)
+ tmpdesc[idx] |= OP_PCL_BLOB_EKT;
+
+ idx++;
+ tmpdesc[0] = CMD_DESC_HDR | HDR_ONE | (idx & HDR_DESCLEN_MASK);
+ dsize = idx * sizeof(u32);
+
+ tdesc = kmalloc(dsize, GFP_KERNEL | GFP_DMA);
+ if (tdesc == NULL)
+ return 0;
+
+ memcpy(tdesc, tmpdesc, dsize);
+ *desc = tdesc;
+ return dsize;
+}
+
+/*
+ * Construct a secure memory blob decapsulation job descriptor
+ *
+ * - desc pointer to hold new (to be allocated) pointer to the generated
+ * descriptor for later use. Calling thread can kfree the
+ * descriptor after execution.
+ * - keymod Physical pointer to key modifier (contiguous piece).
+ * - keymodsz Size of key modifier in bytes (should normally be 16).
+ * - blobbuf Physical pointer (within an accessible secure memory page)
+ * of the blob to be decapsulated.
+ * - outbuf Physical pointer (within an accessible secure memory page)
+ * of the decapsulated output.
+ * - secretsz Size of input blob, in bytes.
+ * - auth If nonzero, assume AES-CCM for decapsulation, else use ECB
+ *
+ * Note: this uses 32-bit pointers at present
+ */
+static int blob_decap_desc(u32 **desc, dma_addr_t keymod, u16 keymodsz,
+ dma_addr_t blobbuf, dma_addr_t outbuf,
+ u16 blobsz, bool auth)
+{
+ u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
+ u16 dsize, idx;
+
+ memset(tmpdesc, 0, INITIAL_DESCSZ * sizeof(u32));
+ idx = 1;
+
+ /* Load key modifier */
+ tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY |
+ ((12 << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK) |
+ (keymodsz & LDST_LEN_MASK);
+
+ tmpdesc[idx++] = (u32)keymod;
+
+ /* Compensate BKEK + MAC tag */
+ tmpdesc[idx++] = CMD_SEQ_IN_PTR | (blobsz + 32 + 16);
+
+ tmpdesc[idx++] = (u32)blobbuf;
+ tmpdesc[idx++] = CMD_SEQ_OUT_PTR | blobsz;
+ tmpdesc[idx++] = (u32)outbuf;
+
+ /* Decapsulate from secure memory partition to black blob */
+ tmpdesc[idx] = CMD_OPERATION | OP_TYPE_DECAP_PROTOCOL | OP_PCLID_BLOB |
+ OP_PCL_BLOB_PTXT_SECMEM | OP_PCL_BLOB_BLACK;
+ if (auth)
+ tmpdesc[idx] |= OP_PCL_BLOB_EKT;
+
+ idx++;
+ tmpdesc[0] = CMD_DESC_HDR | HDR_ONE | (idx & HDR_DESCLEN_MASK);
+ dsize = idx * sizeof(u32);
+
+ tdesc = kmalloc(dsize, GFP_KERNEL | GFP_DMA);
+ if (tdesc == NULL)
+ return 0;
+
+ memcpy(tdesc, tmpdesc, dsize);
+ *desc = tdesc;
+ return dsize;
+}
+
+/*
+ * Pseudo-synchronous ring access functions for carrying out key
+ * encapsulation and decapsulation
+ */
+
+struct sm_key_job_result {
+ int error;
+ struct completion completion;
+};
+
+void sm_key_job_done(struct device *dev, u32 *desc, u32 err, void *context)
+{
+ struct sm_key_job_result *res = context;
+
+ res->error = err; /* save off the error for postprocessing */
+ complete(&res->completion); /* mark us complete */
+}
+
+static int sm_key_job(struct device *ksdev, u32 *jobdesc)
+{
+ struct sm_key_job_result testres;
+ struct caam_drv_private_sm *kspriv;
+ int rtn = 0;
+
+ kspriv = dev_get_drvdata(ksdev);
+
+ init_completion(&testres.completion);
+
+ rtn = caam_jr_enqueue(kspriv->smringdev, jobdesc, sm_key_job_done,
+ &testres);
+ if (!rtn) {
+ wait_for_completion_interruptible(&testres.completion);
+ rtn = testres.error;
+ }
+ return rtn;
+}
+
+/*
+ * Following section establishes the default methods for keystore access
+ * They are NOT intended for use external to this module
+ *
+ * In the present version, these are the only means for the higher-level
+ * interface to deal with the mechanics of accessing the phyiscal keystore
+ */
+
+
+int slot_alloc(struct device *dev, u32 unit, u32 size, u32 *slot)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
+ u32 i;
+
+#ifdef SM_DEBUG
+ dev_info(dev, "slot_alloc(): requesting slot for %d bytes\n", size);
+#endif
+
+ if (size > smpriv->slot_size)
+ return -EKEYREJECTED;
+
+ for (i = 0; i < ksdata->slot_count; i++) {
+ if (ksdata->slot[i].allocated == 0) {
+ ksdata->slot[i].allocated = 1;
+ (*slot) = i;
+#ifdef SM_DEBUG
+ dev_info(dev, "slot_alloc(): new slot %d allocated\n",
+ *slot);
+#endif
+ return 0;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+int slot_dealloc(struct device *dev, u32 unit, u32 slot)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
+ u8 __iomem *slotdata;
+
+#ifdef SM_DEBUG
+ dev_info(dev, "slot_dealloc(): releasing slot %d\n", slot);
+#endif
+ if (slot >= ksdata->slot_count)
+ return -EINVAL;
+ slotdata = ksdata->base_address + slot * smpriv->slot_size;
+
+ if (ksdata->slot[slot].allocated == 1) {
+ /* Forcibly overwrite the data from the keystore */
+ memset(ksdata->base_address + slot * smpriv->slot_size, 0,
+ smpriv->slot_size);
+
+ ksdata->slot[slot].allocated = 0;
+#ifdef SM_DEBUG
+ dev_info(dev, "slot_dealloc(): slot %d released\n", slot);
+#endif
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+void *slot_get_address(struct device *dev, u32 unit, u32 slot)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
+
+ if (slot >= ksdata->slot_count)
+ return NULL;
+
+#ifdef SM_DEBUG
+ dev_info(dev, "slot_get_address(): slot %d is 0x%08x\n", slot,
+ (u32)ksdata->base_address + slot * smpriv->slot_size);
+#endif
+
+ return ksdata->base_address + slot * smpriv->slot_size;
+}
+
+u32 slot_get_base(struct device *dev, u32 unit, u32 slot)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
+
+ /*
+ * There could potentially be more than one secure partition object
+ * associated with this keystore. For now, there is just one.
+ */
+
+ (void)slot;
+
+#ifdef SM_DEBUG
+ dev_info(dev, "slot_get_base(): slot %d = 0x%08x\n",
+ slot, (u32)ksdata->base_address);
+#endif
+
+ return (u32)(ksdata->base_address);
+}
+
+u32 slot_get_offset(struct device *dev, u32 unit, u32 slot)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
+
+ if (slot >= ksdata->slot_count)
+ return -EINVAL;
+
+#ifdef SM_DEBUG
+ dev_info(dev, "slot_get_offset(): slot %d = %d\n", slot,
+ slot * smpriv->slot_size);
+#endif
+
+ return slot * smpriv->slot_size;
+}
+
+u32 slot_get_slot_size(struct device *dev, u32 unit, u32 slot)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+
+
+#ifdef SM_DEBUG
+ dev_info(dev, "slot_get_slot_size(): slot %d = %d\n", slot,
+ smpriv->slot_size);
+#endif
+ /* All slots are the same size in the default implementation */
+ return smpriv->slot_size;
+}
+
+
+
+int kso_init_data(struct device *dev, u32 unit)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+ int retval = -EINVAL;
+ struct keystore_data *keystore_data = NULL;
+ u32 slot_count;
+ u32 keystore_data_size;
+
+ /*
+ * Calculate the required size of the keystore data structure, based
+ * on the number of keys that can fit in the partition.
+ */
+ slot_count = smpriv->page_size / smpriv->slot_size;
+#ifdef SM_DEBUG
+ dev_info(dev, "kso_init_data: %d slots initializing\n", slot_count);
+#endif
+
+ keystore_data_size = sizeof(struct keystore_data) +
+ slot_count *
+ sizeof(struct keystore_data_slot_info);
+
+ keystore_data = kzalloc(keystore_data_size, GFP_KERNEL);
+
+ if (keystore_data == NULL) {
+ retval = -ENOSPC;
+ goto out;
+ }
+
+#ifdef SM_DEBUG
+ dev_info(dev, "kso_init_data: keystore data size = %d\n",
+ keystore_data_size);
+#endif
+
+ /*
+ * Place the slot information structure directly after the keystore data
+ * structure.
+ */
+ keystore_data->slot = (struct keystore_data_slot_info *)
+ (keystore_data + 1);
+ keystore_data->slot_count = slot_count;
+
+ smpriv->pagedesc[unit].ksdata = keystore_data;
+ smpriv->pagedesc[unit].ksdata->base_address =
+ smpriv->pagedesc[unit].pg_base;
+
+ retval = 0;
+
+out:
+ if (retval != 0)
+ if (keystore_data != NULL)
+ kfree(keystore_data);
+
+
+ return retval;
+}
+
+void kso_cleanup_data(struct device *dev, u32 unit)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+ struct keystore_data *keystore_data = NULL;
+
+ if (smpriv->pagedesc[unit].ksdata != NULL)
+ keystore_data = smpriv->pagedesc[unit].ksdata;
+
+ /* Release the allocated keystore management data */
+ kfree(smpriv->pagedesc[unit].ksdata);
+
+ return;
+}
+
+
+
+/*
+ * Keystore management section
+ */
+
+void sm_init_keystore(struct device *dev)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+
+ smpriv->data_init = kso_init_data;
+ smpriv->data_cleanup = kso_cleanup_data;
+ smpriv->slot_alloc = slot_alloc;
+ smpriv->slot_dealloc = slot_dealloc;
+ smpriv->slot_get_address = slot_get_address;
+ smpriv->slot_get_base = slot_get_base;
+ smpriv->slot_get_offset = slot_get_offset;
+ smpriv->slot_get_slot_size = slot_get_slot_size;
+#ifdef SM_DEBUG
+ dev_info(dev, "sm_init_keystore(): handlers installed\n");
+#endif
+}
+EXPORT_SYMBOL(sm_init_keystore);
+
+/* Return available pages/units */
+u32 sm_detect_keystore_units(struct device *dev)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+
+ return smpriv->localpages;
+}
+EXPORT_SYMBOL(sm_detect_keystore_units);
+
+/*
+ * Do any keystore specific initializations
+ */
+int sm_establish_keystore(struct device *dev, u32 unit)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+
+#ifdef SM_DEBUG
+ dev_info(dev, "sm_establish_keystore(): unit %d initializing\n", unit);
+#endif
+
+ if (smpriv->data_init == NULL)
+ return -EINVAL;
+
+ /* Call the data_init function for any user setup */
+ return smpriv->data_init(dev, unit);
+}
+EXPORT_SYMBOL(sm_establish_keystore);
+
+void sm_release_keystore(struct device *dev, u32 unit)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+
+#ifdef SM_DEBUG
+ dev_info(dev, "sm_establish_keystore(): unit %d releasing\n", unit);
+#endif
+ if ((smpriv != NULL) && (smpriv->data_cleanup != NULL))
+ smpriv->data_cleanup(dev, unit);
+
+ return;
+}
+EXPORT_SYMBOL(sm_release_keystore);
+
+/*
+ * Subsequent interfacce (sm_keystore_*) forms the accessor interfacce to
+ * the keystore
+ */
+int sm_keystore_slot_alloc(struct device *dev, u32 unit, u32 size, u32 *slot)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+ int retval = -EINVAL;
+
+ spin_lock(&smpriv->kslock);
+
+ if ((smpriv->slot_alloc == NULL) ||
+ (smpriv->pagedesc[unit].ksdata == NULL))
+ goto out;
+
+ retval = smpriv->slot_alloc(dev, unit, size, slot);
+
+out:
+ spin_unlock(&smpriv->kslock);
+ return retval;
+}
+EXPORT_SYMBOL(sm_keystore_slot_alloc);
+
+int sm_keystore_slot_dealloc(struct device *dev, u32 unit, u32 slot)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+ int retval = -EINVAL;
+
+ spin_lock(&smpriv->kslock);
+
+ if ((smpriv->slot_alloc == NULL) ||
+ (smpriv->pagedesc[unit].ksdata == NULL))
+ goto out;
+
+ retval = smpriv->slot_dealloc(dev, unit, slot);
+out:
+ spin_unlock(&smpriv->kslock);
+ return retval;
+}
+EXPORT_SYMBOL(sm_keystore_slot_dealloc);
+
+int sm_keystore_slot_load(struct device *dev, u32 unit, u32 slot,
+ const u8 *key_data, u32 key_length)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+ int retval = -EINVAL;
+ u32 slot_size;
+ u32 i;
+ u8 __iomem *slot_location;
+
+ spin_lock(&smpriv->kslock);
+
+ slot_size = smpriv->slot_get_slot_size(dev, unit, slot);
+
+ if (key_length > slot_size) {
+ retval = -EFBIG;
+ goto out;
+ }
+
+ slot_location = smpriv->slot_get_address(dev, unit, slot);
+
+ for (i = 0; i < key_length; i++)
+ slot_location[i] = key_data[i];
+
+ retval = 0;
+
+out:
+ spin_unlock(&smpriv->kslock);
+ return retval;
+}
+EXPORT_SYMBOL(sm_keystore_slot_load);
+
+int sm_keystore_slot_read(struct device *dev, u32 unit, u32 slot,
+ u32 key_length, u8 *key_data)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+ int retval = -EINVAL;
+ u8 __iomem *slot_addr;
+ u32 slot_size;
+
+ spin_lock(&smpriv->kslock);
+
+ slot_addr = smpriv->slot_get_address(dev, unit, slot);
+ slot_size = smpriv->slot_get_slot_size(dev, unit, slot);
+
+ if (key_length > slot_size) {
+ retval = -EKEYREJECTED;
+ goto out;
+ }
+
+ memcpy(key_data, slot_addr, key_length);
+ retval = 0;
+
+out:
+ spin_unlock(&smpriv->kslock);
+ return retval;
+}
+EXPORT_SYMBOL(sm_keystore_slot_read);
+
+int sm_keystore_slot_encapsulate(struct device *dev, u32 unit, u32 inslot,
+ u32 outslot, u16 secretlen, u8 *keymod,
+ u16 keymodlen)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+ int retval = 0;
+ u32 slot_length, dsize, jstat;
+ u32 __iomem *encapdesc = NULL;
+ u8 __iomem *lkeymod, *inpslotaddr, *outslotaddr;
+ dma_addr_t keymod_dma;
+
+ /* Ensure that the full blob will fit in the key slot */
+ slot_length = smpriv->slot_get_slot_size(dev, unit, outslot);
+ if ((secretlen + 48) > slot_length)
+ goto out;
+
+ /* Get the base addresses of both keystore slots */
+ inpslotaddr = (u8 *)smpriv->slot_get_address(dev, unit, inslot);
+ outslotaddr = (u8 *)smpriv->slot_get_address(dev, unit, outslot);
+
+ /* Build the key modifier */
+ lkeymod = kmalloc(keymodlen, GFP_KERNEL | GFP_DMA);
+ memcpy(lkeymod, keymod, keymodlen);
+ keymod_dma = dma_map_single(dev, lkeymod, keymodlen, DMA_TO_DEVICE);
+ dma_sync_single_for_device(dev, keymod_dma, keymodlen, DMA_TO_DEVICE);
+
+ /* Build the encapsulation job descriptor */
+ dsize = blob_encap_desc(&encapdesc, keymod_dma, keymodlen,
+ __pa(inpslotaddr), __pa(outslotaddr),
+ secretlen, 0);
+ if (!dsize) {
+ dev_err(dev, "can't alloc an encap descriptor\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+ jstat = sm_key_job(dev, encapdesc);
+
+ dma_unmap_single(dev, keymod_dma, keymodlen, DMA_TO_DEVICE);
+ kfree(encapdesc);
+
+out:
+ return retval;
+
+}
+EXPORT_SYMBOL(sm_keystore_slot_encapsulate);
+
+int sm_keystore_slot_decapsulate(struct device *dev, u32 unit, u32 inslot,
+ u32 outslot, u16 secretlen, u8 *keymod,
+ u16 keymodlen)
+{
+ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
+ int retval = 0;
+ u32 slot_length, dsize, jstat;
+ u32 __iomem *decapdesc = NULL;
+ u8 __iomem *lkeymod, *inpslotaddr, *outslotaddr;
+ dma_addr_t keymod_dma;
+
+ /* Ensure that the decap data will fit in the key slot */
+ slot_length = smpriv->slot_get_slot_size(dev, unit, outslot);
+ if (secretlen > slot_length)
+ goto out;
+
+ /* Get the base addresses of both keystore slots */
+ inpslotaddr = (u8 *)smpriv->slot_get_address(dev, unit, inslot);
+ outslotaddr = (u8 *)smpriv->slot_get_address(dev, unit, outslot);
+
+ /* Build the key modifier */
+ lkeymod = kmalloc(keymodlen, GFP_KERNEL | GFP_DMA);
+ memcpy(lkeymod, keymod, keymodlen);
+ keymod_dma = dma_map_single(dev, lkeymod, keymodlen, DMA_TO_DEVICE);
+ dma_sync_single_for_device(dev, keymod_dma, keymodlen, DMA_TO_DEVICE);
+
+ /* Build the decapsulation job descriptor */
+ dsize = blob_decap_desc(&decapdesc, keymod_dma, keymodlen,
+ __pa(inpslotaddr), __pa(outslotaddr),
+ secretlen, 0);
+ if (!dsize) {
+ dev_err(dev, "can't alloc a decap descriptor\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+ jstat = sm_key_job(dev, decapdesc);
+
+ dma_unmap_single(dev, keymod_dma, keymodlen, DMA_TO_DEVICE);
+ kfree(decapdesc);
+
+out:
+ return retval;
+
+}
+EXPORT_SYMBOL(sm_keystore_slot_decapsulate);
+
+
+/*
+ * Initialization/shutdown subsystem
+ * Assumes statically-invoked startup/shutdown from the controller driver
+ * for the present time, to be reworked when a device tree becomes
+ * available. This code will not modularize in present form.
+ *
+ * Also, simply uses ring 0 for execution at the present
+ */
+
+int caam_sm_startup(struct platform_device *pdev)
+{
+ struct device *ctrldev, *smdev;
+ struct caam_drv_private *ctrlpriv;
+ struct caam_drv_private_sm *smpriv;
+ struct caam_drv_private_jr *jrpriv; /* need this for reg page */
+ struct platform_device *sm_pdev;
+ struct sm_page_descriptor *lpagedesc;
+ u32 page, pgstat, lpagect, detectedpage;
+
+ ctrldev = &pdev->dev;
+ ctrlpriv = dev_get_drvdata(ctrldev);
+
+ /*
+ * Set up the private block for secure memory
+ * Only one instance is possible
+ */
+ smpriv = kzalloc(sizeof(struct caam_drv_private_sm), GFP_KERNEL);
+ if (smpriv == NULL) {
+ dev_err(ctrldev, "can't alloc private mem for secure memory\n");
+ return -ENOMEM;
+ }
+ smpriv->parentdev = ctrldev; /* copy of parent dev is handy */
+
+ /* Create the dev */
+#ifdef CONFIG_OF
+ sm_pdev = of_platform_device_create(np, NULL, ctrldev);
+#else
+ sm_pdev = platform_device_register_data(ctrldev, "caam_sm", 0,
+ smpriv,
+ sizeof(struct caam_drv_private_sm));
+#endif
+ if (sm_pdev == NULL) {
+ kfree(smpriv);
+ return -EINVAL;
+ }
+ smdev = &sm_pdev->dev;
+ dev_set_drvdata(smdev, smpriv);
+ ctrlpriv->smdev = smdev;
+
+ /*
+ * Collect configuration limit data for reference
+ * This batch comes from the partition data/vid registers in perfmon
+ */
+ smpriv->max_pages = ((rd_reg32(&ctrlpriv->ctrl->perfmon.smpart)
+ & SMPART_MAX_NUMPG_MASK) >>
+ SMPART_MAX_NUMPG_SHIFT) + 1;
+ smpriv->top_partition = ((rd_reg32(&ctrlpriv->ctrl->perfmon.smpart)
+ & SMPART_MAX_PNUM_MASK) >>
+ SMPART_MAX_PNUM_SHIFT) + 1;
+ smpriv->top_page = ((rd_reg32(&ctrlpriv->ctrl->perfmon.smpart)
+ & SMPART_MAX_PG_MASK) >> SMPART_MAX_PG_SHIFT) + 1;
+ smpriv->page_size = 1024 << ((rd_reg32(&ctrlpriv->ctrl->perfmon.smvid)
+ & SMVID_PG_SIZE_MASK) >> SMVID_PG_SIZE_SHIFT);
+ smpriv->slot_size = 1 << CONFIG_CRYPTO_DEV_FSL_CAAM_SM_SLOTSIZE;
+
+#ifdef SM_DEBUG
+ dev_info(smdev, "max pages = %d, top partition = %d\n",
+ smpriv->max_pages, smpriv->top_partition);
+ dev_info(smdev, "top page = %d, page size = %d (total = %d)\n",
+ smpriv->top_page, smpriv->page_size,
+ smpriv->top_page * smpriv->page_size);
+ dev_info(smdev, "selected slot size = %d\n", smpriv->slot_size);
+#endif
+
+ /*
+ * Now probe for partitions/pages to which we have access. Note that
+ * these have likely been set up by a bootloader or platform
+ * provisioning application, so we have to assume that we "inherit"
+ * a configuration and work within the constraints of what it might be.
+ *
+ * Assume use of the zeroth ring in the present iteration (until
+ * we can divorce the controller and ring drivers, and then assign
+ * an SM instance to any ring instance).
+ */
+ smpriv->smringdev = ctrlpriv->jrdev[0];
+ jrpriv = dev_get_drvdata(smpriv->smringdev);
+ lpagect = 0;
+ lpagedesc = kzalloc(sizeof(struct sm_page_descriptor)
+ * smpriv->max_pages, GFP_KERNEL);
+ if (lpagedesc == NULL) {
+ kfree(smpriv);
+ return -ENOMEM;
+ }
+
+ for (page = 0; page < smpriv->max_pages; page++) {
+ wr_reg32(&jrpriv->rregs->sm_cmd,
+ ((page << SMC_PAGE_SHIFT) & SMC_PAGE_MASK) |
+ (SMC_CMD_PAGE_INQUIRY & SMC_CMD_MASK));
+ pgstat = rd_reg32(&jrpriv->rregs->sm_status);
+ if (((pgstat & SMCS_PGWON_MASK) >> SMCS_PGOWN_SHIFT)
+ == SMCS_PGOWN_OWNED) { /* our page? */
+ lpagedesc[page].phys_pagenum =
+ (pgstat & SMCS_PAGE_MASK) >> SMCS_PAGE_SHIFT;
+ lpagedesc[page].own_part =
+ (pgstat & SMCS_PART_SHIFT) >> SMCS_PART_MASK;
+ lpagedesc[page].pg_base = ctrlpriv->sm_base +
+ ((smpriv->page_size * page) / sizeof(u32));
+ lpagect++;
+#ifdef SM_DEBUG
+ dev_info(smdev,
+ "physical page %d, owning partition = %d\n",
+ lpagedesc[page].phys_pagenum,
+ lpagedesc[page].own_part);
+#endif
+ }
+ }
+
+ smpriv->pagedesc = kmalloc(sizeof(struct sm_page_descriptor) * lpagect,
+ GFP_KERNEL);
+ if (smpriv->pagedesc == NULL) {
+ kfree(lpagedesc);
+ kfree(smpriv);
+ return -ENOMEM;
+ }
+ smpriv->localpages = lpagect;
+
+ detectedpage = 0;
+ for (page = 0; page < smpriv->max_pages; page++) {
+ if (lpagedesc[page].pg_base != NULL) { /* e.g. live entry */
+ memcpy(&smpriv->pagedesc[detectedpage],
+ &lpagedesc[page],
+ sizeof(struct sm_page_descriptor));
+#ifdef SM_DEBUG_CONT
+ sm_show_page(smdev, &smpriv->pagedesc[detectedpage]);
+#endif
+ detectedpage++;
+ }
+ }
+
+ kfree(lpagedesc);
+
+ sm_init_keystore(smdev);
+
+ return 0;
+}
+
+void caam_sm_shutdown(struct platform_device *pdev)
+{
+ struct device *ctrldev, *smdev;
+ struct caam_drv_private *priv;
+ struct caam_drv_private_sm *smpriv;
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+ smdev = priv->smdev;
+ smpriv = dev_get_drvdata(smdev);
+
+ kfree(smpriv->pagedesc);
+ kfree(smpriv);
+}
+
+#ifdef CONFIG_OF
+static void __exit caam_sm_exit(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev)
+ return -ENODEV;
+
+ of_node_put(dev_node);
+
+ caam_sm_shutdown(pdev);
+}
+
+static int __init caam_sm_init(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+
+ /*
+ * Do of_find_compatible_node() then of_find_device_by_node()
+ * once a functional device tree is available
+ */
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev)
+ return -ENODEV;
+
+ of_node_put(dev_node);
+
+ return caam_sm_startup(pdev);
+}
+
+module_init(caam_sm_init);
+module_exit(caam_sm_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL CAAM Secure Memory / Keystore");
+MODULE_AUTHOR("Freescale Semiconductor - NMSG/MAD");
+#endif
diff --git a/drivers/crypto/caam/sm_test.c b/drivers/crypto/caam/sm_test.c
new file mode 100644
index 000000000000..1e00b61584fc
--- /dev/null
+++ b/drivers/crypto/caam/sm_test.c
@@ -0,0 +1,796 @@
+
+/*
+ * Secure Memory / Keystore Exemplification Module
+ * Copyright (c) 2012 Freescale Semiconductor. All Rights Reserved
+ *
+ * Serves as a functional example, and as a self-contained unit test for
+ * the functionality contained in sm_store.c.
+ *
+ * The example function, caam_sm_example_init(), runs a thread that:
+ *
+ * - initializes a set of fixed keys
+ * - stores one copy in clear buffers
+ * - stores them again in secure memory
+ * - extracts stored keys back out for use
+ * - intializes 3 data buffers for a test:
+ * (1) containing cleartext
+ * (2) to hold ciphertext encrypted with an extracted black key
+ * (3) to hold extracted cleartext decrypted with an equivalent clear key
+ *
+ * The function then builds simple job descriptors that reference the key
+ * material and buffers as initialized, and executes an encryption job
+ * with a black key, and a decryption job using a the same key held in the
+ * clear. The output of the decryption job is compared to the original
+ * cleartext; if they don't compare correctly, one can assume a key problem
+ * exists, where the function will exit with an error.
+ *
+ * This module can use a substantial amount of refactoring, which may occur
+ * after the API gets some mileage. Furthermore, expect this module to
+ * eventually disappear once the API is integrated into "real" software.
+ */
+
+#include "compat.h"
+#include "intern.h"
+#include "desc.h"
+#include "error.h"
+#include "jr.h"
+#include "sm.h"
+
+static u8 skeymod[] = {
+ 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
+ 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00
+};
+static u8 symkey[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
+};
+
+static u8 symdata[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x0f, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+};
+
+static int mk_job_desc(u32 *desc, dma_addr_t key, u16 keysz, dma_addr_t indata,
+ dma_addr_t outdata, u16 sz, u32 cipherdir, u32 keymode)
+{
+ desc[1] = CMD_KEY | CLASS_1 | (keysz & KEY_LENGTH_MASK) | keymode;
+ desc[2] = (u32)key;
+ desc[3] = CMD_OPERATION | OP_TYPE_CLASS1_ALG | OP_ALG_AAI_ECB |
+ cipherdir;
+ desc[4] = CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1 | sz;
+ desc[5] = (u32)indata;
+ desc[6] = CMD_FIFO_STORE | FIFOST_TYPE_MESSAGE_DATA | sz;
+ desc[7] = (u32)outdata;
+
+ desc[0] = CMD_DESC_HDR | HDR_ONE | (8 & HDR_DESCLEN_MASK);
+ return 8 * sizeof(u32);
+}
+
+struct exec_test_result {
+ int error;
+ struct completion completion;
+};
+
+void exec_test_done(struct device *dev, u32 *desc, u32 err, void *context)
+{
+ struct exec_test_result *res = context;
+
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+ dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ res->error = err;
+ complete(&res->completion);
+}
+
+static int exec_test_job(struct device *ksdev, u32 *jobdesc)
+{
+ struct exec_test_result testres;
+ struct caam_drv_private_sm *kspriv;
+ int rtn = 0;
+
+ kspriv = dev_get_drvdata(ksdev);
+
+ init_completion(&testres.completion);
+
+ rtn = caam_jr_enqueue(kspriv->smringdev, jobdesc, exec_test_done,
+ &testres);
+ if (!rtn) {
+ wait_for_completion_interruptible(&testres.completion);
+ rtn = testres.error;
+ }
+ return rtn;
+}
+
+
+int caam_sm_example_init(struct platform_device *pdev)
+{
+ struct device *ctrldev, *ksdev;
+ struct caam_drv_private *ctrlpriv;
+ struct caam_drv_private_sm *kspriv;
+ u32 unit, units, jdescsz;
+ int stat, jstat, rtnval = 0;
+ u8 __iomem *syminp, *symint, *symout = NULL;
+ dma_addr_t syminp_dma, symint_dma, symout_dma;
+ u8 __iomem *black_key_des, *black_key_aes128;
+ u8 __iomem *black_key_aes256;
+ dma_addr_t black_key_des_dma, black_key_aes128_dma;
+ dma_addr_t black_key_aes256_dma;
+ u8 __iomem *clear_key_des, *clear_key_aes128, *clear_key_aes256;
+ dma_addr_t clear_key_des_dma, clear_key_aes128_dma;
+ dma_addr_t clear_key_aes256_dma;
+ u32 __iomem *jdesc;
+ u32 keyslot_des, keyslot_aes128, keyslot_aes256 = 0;
+
+ jdesc = NULL;
+ black_key_des = black_key_aes128 = black_key_aes256 = NULL;
+ clear_key_des = clear_key_aes128 = clear_key_aes256 = NULL;
+
+ /* We can lose this cruft once we can get a pdev by name */
+ ctrldev = &pdev->dev;
+ ctrlpriv = dev_get_drvdata(ctrldev);
+ ksdev = ctrlpriv->smdev;
+ kspriv = dev_get_drvdata(ksdev);
+ if (kspriv == NULL)
+ return -ENODEV;
+
+ /* Now that we have the dev for the single SM instance, connect */
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "caam_sm_test_init() running\n");
+#endif
+ /* Probe to see what keystores are available to us */
+ units = sm_detect_keystore_units(ksdev);
+ if (!units)
+ dev_err(ksdev, "caam_sm_test: no keystore units available\n");
+
+ /*
+ * MX6 bootloader stores some stuff in unit 0, so let's
+ * use 1 or above
+ */
+ if (units < 2) {
+ dev_err(ksdev, "caam_sm_test: insufficient keystore units\n");
+ return -ENODEV;
+ }
+ unit = 1;
+
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "caam_sm_test: %d keystore units available\n", units);
+#endif
+
+ /* Initialize/Establish Keystore */
+ sm_establish_keystore(ksdev, unit); /* Initalize store in #1 */
+
+ /*
+ * Top of main test thread
+ */
+
+ /* Allocate test data blocks (input, intermediate, output) */
+ syminp = kmalloc(256, GFP_KERNEL | GFP_DMA);
+ symint = kmalloc(256, GFP_KERNEL | GFP_DMA);
+ symout = kmalloc(256, GFP_KERNEL | GFP_DMA);
+ if ((syminp == NULL) || (symint == NULL) || (symout == NULL)) {
+ rtnval = -ENOMEM;
+ dev_err(ksdev, "caam_sm_test: can't get test data buffers\n");
+ goto freemem;
+ }
+
+ /* Allocate storage for 3 black keys: encapsulated 8, 16, 32 */
+ black_key_des = kmalloc(16, GFP_KERNEL | GFP_DMA); /* padded to 16... */
+ black_key_aes128 = kmalloc(16, GFP_KERNEL | GFP_DMA);
+ black_key_aes256 = kmalloc(16, GFP_KERNEL | GFP_DMA);
+ if ((black_key_des == NULL) || (black_key_aes128 == NULL) ||
+ (black_key_aes256 == NULL)) {
+ rtnval = -ENOMEM;
+ dev_err(ksdev, "caam_sm_test: can't black key buffers\n");
+ goto freemem;
+ }
+
+ clear_key_des = kmalloc(8, GFP_KERNEL | GFP_DMA);
+ clear_key_aes128 = kmalloc(16, GFP_KERNEL | GFP_DMA);
+ clear_key_aes256 = kmalloc(32, GFP_KERNEL | GFP_DMA);
+ if ((clear_key_des == NULL) || (clear_key_aes128 == NULL) ||
+ (clear_key_aes256 == NULL)) {
+ rtnval = -ENOMEM;
+ dev_err(ksdev, "caam_sm_test: can't get clear key buffers\n");
+ goto freemem;
+ }
+
+ /* Allocate storage for job descriptor */
+ jdesc = kmalloc(8 * sizeof(u32), GFP_KERNEL | GFP_DMA);
+ if (jdesc == NULL) {
+ rtnval = -ENOMEM;
+ dev_err(ksdev, "caam_sm_test: can't get descriptor buffers\n");
+ goto freemem;
+ }
+
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "caam_sm_test: all buffers allocated\n");
+#endif
+
+ /* Load up input data block, clear outputs */
+ memcpy(syminp, symdata, 256);
+ memset(symint, 0, 256);
+ memset(symout, 0, 256);
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ syminp[0], syminp[1], syminp[2], syminp[3],
+ syminp[4], syminp[5], syminp[6], syminp[7]);
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symint[0], symint[1], symint[2], symint[3],
+ symint[4], symint[5], symint[6], symint[7]);
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symout[0], symout[1], symout[2], symout[3],
+ symout[4], symout[5], symout[6], symout[7]);
+
+ dev_info(ksdev, "caam_sm_test: data buffers initialized\n");
+#endif
+
+ /* Load up clear keys */
+ memcpy(clear_key_des, symkey, 8);
+ memcpy(clear_key_aes128, symkey, 16);
+ memcpy(clear_key_aes256, symkey, 32);
+
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "caam_sm_test: all clear keys loaded\n");
+#endif
+
+ /*
+ * Place clear keys in keystore.
+ * All the interesting stuff happens here.
+ */
+ /* 8 bit DES key */
+ stat = sm_keystore_slot_alloc(ksdev, unit, 8, &keyslot_des);
+ if (stat)
+ goto freemem;
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "caam_sm_test: 8 byte key slot in %d\n" keyslot_des);
+#endif
+ stat = sm_keystore_slot_load(ksdev, unit, keyslot_des, clear_key_des,
+ 8);
+ if (stat) {
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "caam_sm_test: can't load 8 byte key in %d\n",
+ keyslot_des);
+#endif
+ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
+ goto freemem;
+ }
+
+ /* 16 bit AES key */
+ stat = sm_keystore_slot_alloc(ksdev, unit, 16, &keyslot_aes128);
+ if (stat) {
+ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
+ goto freemem;
+ }
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "caam_sm_test: 16 byte key slot in %d\n",
+ keyslot_aes128);
+#endif
+ stat = sm_keystore_slot_load(ksdev, unit, keyslot_aes128,
+ clear_key_aes128, 16);
+ if (stat) {
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "caam_sm_test: can't load 16 byte key in %d\n",
+ keyslot_aes128);
+#endif
+ sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes128);
+ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
+ goto freemem;
+ }
+
+ /* 32 bit AES key */
+ stat = sm_keystore_slot_alloc(ksdev, unit, 32, &keyslot_aes256);
+ if (stat) {
+ sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes128);
+ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
+ goto freemem;
+ }
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "caam_sm_test: 32 byte key slot in %d\n",
+ keyslot_aes256);
+#endif
+ stat = sm_keystore_slot_load(ksdev, unit, keyslot_aes256,
+ clear_key_aes256, 32);
+ if (stat) {
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "caam_sm_test: can't load 32 byte key in %d\n",
+ keyslot_aes128);
+#endif
+ sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes256);
+ sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes128);
+ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
+ goto freemem;
+ }
+
+ /* Encapsulate all keys as SM blobs */
+ stat = sm_keystore_slot_encapsulate(ksdev, unit, keyslot_des,
+ keyslot_des, 8, skeymod, 8);
+ if (stat) {
+ dev_info(ksdev, "caam_sm_test: can't encapsulate DES key\n");
+ goto freekeys;
+ }
+
+ stat = sm_keystore_slot_encapsulate(ksdev, unit, keyslot_aes128,
+ keyslot_aes128, 16, skeymod, 8);
+ if (stat) {
+ dev_info(ksdev, "caam_sm_test: can't encapsulate AES128 key\n");
+ goto freekeys;
+ }
+
+ stat = sm_keystore_slot_encapsulate(ksdev, unit, keyslot_aes256,
+ keyslot_aes256, 32, skeymod, 8);
+ if (stat) {
+ dev_info(ksdev, "caam_sm_test: can't encapsulate AES256 key\n");
+ goto freekeys;
+ }
+
+ /* Now decapsulate as black key blobs */
+ stat = sm_keystore_slot_decapsulate(ksdev, unit, keyslot_des,
+ keyslot_des, 8, skeymod, 8);
+ if (stat) {
+ dev_info(ksdev, "caam_sm_test: can't decapsulate DES key\n");
+ goto freekeys;
+ }
+
+ stat = sm_keystore_slot_decapsulate(ksdev, unit, keyslot_aes128,
+ keyslot_aes128, 16, skeymod, 8);
+ if (stat) {
+ dev_info(ksdev, "caam_sm_test: can't decapsulate AES128 key\n");
+ goto freekeys;
+ }
+
+ stat = sm_keystore_slot_decapsulate(ksdev, unit, keyslot_aes256,
+ keyslot_aes256, 32, skeymod, 8);
+ if (stat) {
+ dev_info(ksdev, "caam_sm_test: can't decapsulate AES128 key\n");
+ goto freekeys;
+ }
+
+ /* Extract 8/16/32 byte black keys */
+ sm_keystore_slot_read(ksdev, unit, keyslot_des, 8, black_key_des);
+ sm_keystore_slot_read(ksdev, unit, keyslot_aes128, 16,
+ black_key_aes128);
+ sm_keystore_slot_read(ksdev, unit, keyslot_aes256, 32,
+ black_key_aes256);
+
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "caam_sm_test: all black keys extracted\n");
+#endif
+
+ /* DES encrypt using 8 byte black key */
+ black_key_des_dma = dma_map_single(ksdev, black_key_des, 8,
+ DMA_TO_DEVICE);
+ dma_sync_single_for_device(ksdev, black_key_des_dma, 8, DMA_TO_DEVICE);
+ syminp_dma = dma_map_single(ksdev, syminp, 256, DMA_TO_DEVICE);
+ dma_sync_single_for_device(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
+ symint_dma = dma_map_single(ksdev, symint, 256, DMA_FROM_DEVICE);
+
+ jdescsz = mk_job_desc(jdesc, black_key_des_dma, 8, syminp_dma,
+ symint_dma, 256,
+ OP_ALG_ENCRYPT | OP_ALG_ALGSEL_DES, 0);
+
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "jobdesc:\n");
+ dev_info(ksdev, "0x%08x\n", jdesc[0]);
+ dev_info(ksdev, "0x%08x\n", jdesc[1]);
+ dev_info(ksdev, "0x%08x\n", jdesc[2]);
+ dev_info(ksdev, "0x%08x\n", jdesc[3]);
+ dev_info(ksdev, "0x%08x\n", jdesc[4]);
+ dev_info(ksdev, "0x%08x\n", jdesc[5]);
+ dev_info(ksdev, "0x%08x\n", jdesc[6]);
+ dev_info(ksdev, "0x%08x\n", jdesc[7]);
+#endif
+
+ jstat = exec_test_job(ksdev, jdesc);
+
+ dma_sync_single_for_cpu(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
+ dma_unmap_single(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
+ dma_unmap_single(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
+ dma_unmap_single(ksdev, black_key_des_dma, 8, DMA_TO_DEVICE);
+
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "input block:\n");
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ syminp[0], syminp[1], syminp[2], syminp[3],
+ syminp[4], syminp[5], syminp[6], syminp[7]);
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ syminp[8], syminp[9], syminp[10], syminp[11],
+ syminp[12], syminp[13], syminp[14], syminp[15]);
+ dev_info(ksdev, "intermediate block:\n");
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symint[0], symint[1], symint[2], symint[3],
+ symint[4], symint[5], symint[6], symint[7]);
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symint[8], symint[9], symint[10], symint[11],
+ symint[12], symint[13], symint[14], symint[15]);
+ dev_info(ksdev, "caam_sm_test: encrypt cycle with 8 byte key\n");
+#endif
+
+ /* DES decrypt using 8 byte clear key */
+ clear_key_des_dma = dma_map_single(ksdev, clear_key_des, 8,
+ DMA_TO_DEVICE);
+ dma_sync_single_for_device(ksdev, clear_key_des_dma, 8, DMA_TO_DEVICE);
+ symint_dma = dma_map_single(ksdev, symint, 256, DMA_TO_DEVICE);
+ dma_sync_single_for_device(ksdev, symint_dma, 256, DMA_TO_DEVICE);
+ symout_dma = dma_map_single(ksdev, symout, 256, DMA_FROM_DEVICE);
+
+ jdescsz = mk_job_desc(jdesc, clear_key_des_dma, 8, symint_dma,
+ symout_dma, 256,
+ OP_ALG_DECRYPT | OP_ALG_ALGSEL_DES, 0);
+
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "jobdesc:\n");
+ dev_info(ksdev, "0x%08x\n", jdesc[0]);
+ dev_info(ksdev, "0x%08x\n", jdesc[1]);
+ dev_info(ksdev, "0x%08x\n", jdesc[2]);
+ dev_info(ksdev, "0x%08x\n", jdesc[3]);
+ dev_info(ksdev, "0x%08x\n", jdesc[4]);
+ dev_info(ksdev, "0x%08x\n", jdesc[5]);
+ dev_info(ksdev, "0x%08x\n", jdesc[6]);
+ dev_info(ksdev, "0x%08x\n", jdesc[7]);
+#endif
+
+ jstat = exec_test_job(ksdev, jdesc);
+
+ dma_sync_single_for_cpu(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
+ dma_unmap_single(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
+ dma_unmap_single(ksdev, symint_dma, 256, DMA_TO_DEVICE);
+ dma_unmap_single(ksdev, clear_key_des_dma, 8, DMA_TO_DEVICE);
+
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "intermediate block:\n");
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symint[0], symint[1], symint[2], symint[3],
+ symint[4], symint[5], symint[6], symint[7]);
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symint[8], symint[9], symint[10], symint[11],
+ symint[12], symint[13], symint[14], symint[15]);
+ dev_info(ksdev, "decrypted block:\n");
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symout[0], symout[1], symout[2], symout[3],
+ symout[4], symout[5], symout[6], symout[7]);
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symout[8], symout[9], symout[10], symout[11],
+ symout[12], symout[13], symout[14], symout[15]);
+ dev_info(ksdev, "caam_sm_test: decrypt cycle with 8 byte key\n");
+#endif
+
+ /* Check result */
+ if (memcmp(symout, syminp, 256)) {
+ dev_info(ksdev, "caam_sm_test: 8-byte key test mismatch\n");
+ rtnval = -1;
+ goto freekeys;
+ } else
+ dev_info(ksdev, "caam_sm_test: 8-byte key test match OK\n");
+
+ /* AES-128 encrypt using 16 byte black key */
+ black_key_aes128_dma = dma_map_single(ksdev, black_key_aes128, 16,
+ DMA_TO_DEVICE);
+ dma_sync_single_for_device(ksdev, black_key_aes128_dma, 16,
+ DMA_TO_DEVICE);
+ syminp_dma = dma_map_single(ksdev, syminp, 256, DMA_TO_DEVICE);
+ dma_sync_single_for_device(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
+ symint_dma = dma_map_single(ksdev, symint, 256, DMA_FROM_DEVICE);
+
+ jdescsz = mk_job_desc(jdesc, black_key_aes128_dma, 16, syminp_dma,
+ symint_dma, 256,
+ OP_ALG_ENCRYPT | OP_ALG_ALGSEL_AES, 0);
+
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "jobdesc:\n");
+ dev_info(ksdev, "0x%08x\n", jdesc[0]);
+ dev_info(ksdev, "0x%08x\n", jdesc[1]);
+ dev_info(ksdev, "0x%08x\n", jdesc[2]);
+ dev_info(ksdev, "0x%08x\n", jdesc[3]);
+ dev_info(ksdev, "0x%08x\n", jdesc[4]);
+ dev_info(ksdev, "0x%08x\n", jdesc[5]);
+ dev_info(ksdev, "0x%08x\n", jdesc[6]);
+ dev_info(ksdev, "0x%08x\n", jdesc[7]);
+#endif
+
+ jstat = exec_test_job(ksdev, jdesc);
+
+ dma_sync_single_for_cpu(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
+ dma_unmap_single(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
+ dma_unmap_single(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
+ dma_unmap_single(ksdev, black_key_aes128_dma, 16, DMA_TO_DEVICE);
+
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "input block:\n");
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ syminp[0], syminp[1], syminp[2], syminp[3],
+ syminp[4], syminp[5], syminp[6], syminp[7]);
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ syminp[8], syminp[9], syminp[10], syminp[11],
+ syminp[12], syminp[13], syminp[14], syminp[15]);
+ dev_info(ksdev, "intermediate block:\n");
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symint[0], symint[1], symint[2], symint[3],
+ symint[4], symint[5], symint[6], symint[7]);
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symint[8], symint[9], symint[10], symint[11],
+ symint[12], symint[13], symint[14], symint[15]);
+ dev_info(ksdev, "caam_sm_test: encrypt cycle with 16 byte key\n");
+#endif
+
+ /* AES-128 decrypt using 16 byte clear key */
+ clear_key_aes128_dma = dma_map_single(ksdev, clear_key_aes128, 16,
+ DMA_TO_DEVICE);
+ dma_sync_single_for_device(ksdev, clear_key_aes128_dma, 16,
+ DMA_TO_DEVICE);
+ symint_dma = dma_map_single(ksdev, symint, 256, DMA_TO_DEVICE);
+ dma_sync_single_for_device(ksdev, symint_dma, 256, DMA_TO_DEVICE);
+ symout_dma = dma_map_single(ksdev, symout, 256, DMA_FROM_DEVICE);
+
+ jdescsz = mk_job_desc(jdesc, clear_key_aes128_dma, 16, symint_dma,
+ symout_dma, 256,
+ OP_ALG_DECRYPT | OP_ALG_ALGSEL_AES, 0);
+
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "jobdesc:\n");
+ dev_info(ksdev, "0x%08x\n", jdesc[0]);
+ dev_info(ksdev, "0x%08x\n", jdesc[1]);
+ dev_info(ksdev, "0x%08x\n", jdesc[2]);
+ dev_info(ksdev, "0x%08x\n", jdesc[3]);
+ dev_info(ksdev, "0x%08x\n", jdesc[4]);
+ dev_info(ksdev, "0x%08x\n", jdesc[5]);
+ dev_info(ksdev, "0x%08x\n", jdesc[6]);
+ dev_info(ksdev, "0x%08x\n", jdesc[7]);
+#endif
+ jstat = exec_test_job(ksdev, jdesc);
+
+ dma_sync_single_for_cpu(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
+ dma_unmap_single(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
+ dma_unmap_single(ksdev, symint_dma, 256, DMA_TO_DEVICE);
+ dma_unmap_single(ksdev, clear_key_aes128_dma, 16, DMA_TO_DEVICE);
+
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "intermediate block:\n");
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symint[0], symint[1], symint[2], symint[3],
+ symint[4], symint[5], symint[6], symint[7]);
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symint[8], symint[9], symint[10], symint[11],
+ symint[12], symint[13], symint[14], symint[15]);
+ dev_info(ksdev, "decrypted block:\n");
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symout[0], symout[1], symout[2], symout[3],
+ symout[4], symout[5], symout[6], symout[7]);
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symout[8], symout[9], symout[10], symout[11],
+ symout[12], symout[13], symout[14], symout[15]);
+ dev_info(ksdev, "caam_sm_test: decrypt cycle with 16 byte key\n");
+#endif
+
+ /* Check result */
+ if (memcmp(symout, syminp, 256)) {
+ dev_info(ksdev, "caam_sm_test: 16-byte key test mismatch\n");
+ rtnval = -1;
+ goto freekeys;
+ } else
+ dev_info(ksdev, "caam_sm_test: 16-byte key test match OK\n");
+
+ /* AES-256 encrypt using 32 byte black key */
+ black_key_aes256_dma = dma_map_single(ksdev, black_key_aes256, 32,
+ DMA_TO_DEVICE);
+ dma_sync_single_for_device(ksdev, black_key_aes256_dma, 32,
+ DMA_TO_DEVICE);
+ syminp_dma = dma_map_single(ksdev, syminp, 256, DMA_TO_DEVICE);
+ dma_sync_single_for_device(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
+ symint_dma = dma_map_single(ksdev, symint, 256, DMA_FROM_DEVICE);
+
+ jdescsz = mk_job_desc(jdesc, black_key_aes256_dma, 32, syminp_dma,
+ symint_dma, 256,
+ OP_ALG_ENCRYPT | OP_ALG_ALGSEL_AES, 0);
+
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "jobdesc:\n");
+ dev_info(ksdev, "0x%08x\n", jdesc[0]);
+ dev_info(ksdev, "0x%08x\n", jdesc[1]);
+ dev_info(ksdev, "0x%08x\n", jdesc[2]);
+ dev_info(ksdev, "0x%08x\n", jdesc[3]);
+ dev_info(ksdev, "0x%08x\n", jdesc[4]);
+ dev_info(ksdev, "0x%08x\n", jdesc[5]);
+ dev_info(ksdev, "0x%08x\n", jdesc[6]);
+ dev_info(ksdev, "0x%08x\n", jdesc[7]);
+#endif
+
+ jstat = exec_test_job(ksdev, jdesc);
+
+ dma_sync_single_for_cpu(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
+ dma_unmap_single(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
+ dma_unmap_single(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
+ dma_unmap_single(ksdev, black_key_aes256_dma, 32, DMA_TO_DEVICE);
+
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "input block:\n");
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ syminp[0], syminp[1], syminp[2], syminp[3],
+ syminp[4], syminp[5], syminp[6], syminp[7]);
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ syminp[8], syminp[9], syminp[10], syminp[11],
+ syminp[12], syminp[13], syminp[14], syminp[15]);
+ dev_info(ksdev, "intermediate block:\n");
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symint[0], symint[1], symint[2], symint[3],
+ symint[4], symint[5], symint[6], symint[7]);
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symint[8], symint[9], symint[10], symint[11],
+ symint[12], symint[13], symint[14], symint[15]);
+ dev_info(ksdev, "caam_sm_test: encrypt cycle with 32 byte key\n");
+#endif
+
+ /* AES-256 decrypt using 32-byte black key */
+ clear_key_aes256_dma = dma_map_single(ksdev, clear_key_aes256, 32,
+ DMA_TO_DEVICE);
+ dma_sync_single_for_device(ksdev, clear_key_aes256_dma, 32,
+ DMA_TO_DEVICE);
+ symint_dma = dma_map_single(ksdev, symint, 256, DMA_TO_DEVICE);
+ dma_sync_single_for_device(ksdev, symint_dma, 256, DMA_TO_DEVICE);
+ symout_dma = dma_map_single(ksdev, symout, 256, DMA_FROM_DEVICE);
+
+ jdescsz = mk_job_desc(jdesc, clear_key_aes256_dma, 32, symint_dma,
+ symout_dma, 256,
+ OP_ALG_DECRYPT | OP_ALG_ALGSEL_AES, 0);
+
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "jobdesc:\n");
+ dev_info(ksdev, "0x%08x\n", jdesc[0]);
+ dev_info(ksdev, "0x%08x\n", jdesc[1]);
+ dev_info(ksdev, "0x%08x\n", jdesc[2]);
+ dev_info(ksdev, "0x%08x\n", jdesc[3]);
+ dev_info(ksdev, "0x%08x\n", jdesc[4]);
+ dev_info(ksdev, "0x%08x\n", jdesc[5]);
+ dev_info(ksdev, "0x%08x\n", jdesc[6]);
+ dev_info(ksdev, "0x%08x\n", jdesc[7]);
+#endif
+
+ jstat = exec_test_job(ksdev, jdesc);
+
+ dma_sync_single_for_cpu(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
+ dma_unmap_single(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
+ dma_unmap_single(ksdev, symint_dma, 256, DMA_TO_DEVICE);
+ dma_unmap_single(ksdev, clear_key_aes256_dma, 32, DMA_TO_DEVICE);
+
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "intermediate block:\n");
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symint[0], symint[1], symint[2], symint[3],
+ symint[4], symint[5], symint[6], symint[7]);
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symint[8], symint[9], symint[10], symint[11],
+ symint[12], symint[13], symint[14], symint[15]);
+ dev_info(ksdev, "decrypted block:\n");
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symout[0], symout[1], symout[2], symout[3],
+ symout[4], symout[5], symout[6], symout[7]);
+ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
+ "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ symout[8], symout[9], symout[10], symout[11],
+ symout[12], symout[13], symout[14], symout[15]);
+ dev_info(ksdev, "caam_sm_test: decrypt cycle with 32 byte key\n");
+#endif
+
+ /* Check result */
+ if (memcmp(symout, syminp, 256)) {
+ dev_info(ksdev, "caam_sm_test: 32-byte key test mismatch\n");
+ rtnval = -1;
+ goto freekeys;
+ } else
+ dev_info(ksdev, "caam_sm_test: 32-byte key test match OK\n");
+
+
+ /* Remove 8/16/32 byte keys from keystore */
+freekeys:
+ stat = sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
+ if (stat)
+ dev_info(ksdev, "caam_sm_test: can't release slot %d\n",
+ keyslot_des);
+
+ stat = sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes128);
+ if (stat)
+ dev_info(ksdev, "caam_sm_test: can't release slot %d\n",
+ keyslot_aes128);
+
+ stat = sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes256);
+ if (stat)
+ dev_info(ksdev, "caam_sm_test: can't release slot %d\n",
+ keyslot_aes256);
+
+
+ /* Free resources */
+freemem:
+#ifdef SM_TEST_DETAIL
+ dev_info(ksdev, "caam_sm_test: cleaning up\n");
+#endif
+ kfree(syminp);
+ kfree(symint);
+ kfree(symout);
+ kfree(clear_key_des);
+ kfree(clear_key_aes128);
+ kfree(clear_key_aes256);
+ kfree(black_key_des);
+ kfree(black_key_aes128);
+ kfree(black_key_aes256);
+ kfree(jdesc);
+
+ /* Disconnect from keystore and leave */
+ sm_release_keystore(ksdev, unit);
+
+ return rtnval;
+}
+
+void caam_sm_example_shutdown(void)
+{
+ /* unused in present version */
+}
+
+/* Module-based initialization needs to wait for dev tree */
+#ifdef CONFIG_OF
+module_init(caam_sm_example_init);
+module_exit(caam_sm_example_shutdown);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL CAAM Keystore Usage Example");
+MODULE_AUTHOR("Freescale Semiconductor - NMSG/MAD");
+#endif
diff --git a/drivers/crypto/caam/snvsregs.h b/drivers/crypto/caam/snvsregs.h
new file mode 100644
index 000000000000..0ac415e2602b
--- /dev/null
+++ b/drivers/crypto/caam/snvsregs.h
@@ -0,0 +1,237 @@
+/*
+ * SNVS hardware register-level view
+ *
+ * Copyright (c) 2012 Freescale Semiconductor, Inc., All Rights Reserved
+ */
+
+#ifndef SNVSREGS_H
+#define SNVSREGS_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+/*
+ * SNVS High Power Domain
+ * Includes security violations, HA counter, RTC, alarm
+ */
+struct snvs_hp {
+ u32 lock;
+ u32 cmd;
+ u32 ctl;
+ u32 secvio_int_en; /* Security Violation Interrupt Enable */
+ u32 secvio_int_ctl; /* Security Violation Interrupt Control */
+ u32 status;
+ u32 secvio_status; /* Security Violation Status */
+ u32 ha_counteriv; /* High Assurance Counter IV */
+ u32 ha_counter; /* High Assurance Counter */
+ u32 rtc_msb; /* Real Time Clock/Counter MSB */
+ u32 rtc_lsb; /* Real Time Counter LSB */
+ u32 time_alarm_msb; /* Time Alarm MSB */
+ u32 time_alarm_lsb; /* Time Alarm LSB */
+};
+
+#define HP_LOCK_HAC_LCK 0x00040000
+#define HP_LOCK_HPSICR_LCK 0x00020000
+#define HP_LOCK_HPSVCR_LCK 0x00010000
+#define HP_LOCK_MKEYSEL_LCK 0x00000200
+#define HP_LOCK_TAMPCFG_LCK 0x00000100
+#define HP_LOCK_TAMPFLT_LCK 0x00000080
+#define HP_LOCK_SECVIO_LCK 0x00000040
+#define HP_LOCK_GENP_LCK 0x00000020
+#define HP_LOCK_MONOCTR_LCK 0x00000010
+#define HP_LOCK_CALIB_LCK 0x00000008
+#define HP_LOCK_SRTC_LCK 0x00000004
+#define HP_LOCK_ZMK_RD_LCK 0x00000002
+#define HP_LOCK_ZMK_WT_LCK 0x00000001
+
+#define HP_CMD_NONPRIV_AXS 0x80000000
+#define HP_CMD_HAC_STOP 0x00080000
+#define HP_CMD_HAC_CLEAR 0x00040000
+#define HP_CMD_HAC_LOAD 0x00020000
+#define HP_CMD_HAC_CFG_EN 0x00010000
+#define HP_CMD_SNVS_MSTR_KEY 0x00002000
+#define HP_CMD_PROG_ZMK 0x00001000
+#define HP_CMD_SW_LPSV 0x00000400
+#define HP_CMD_SW_FSV 0x00000200
+#define HP_CMD_SW_SV 0x00000100
+#define HP_CMD_LP_SWR_DIS 0x00000020
+#define HP_CMD_LP_SWR 0x00000010
+#define HP_CMD_SSM_SFNS_DIS 0x00000004
+#define HP_CMD_SSM_ST_DIS 0x00000002
+#define HP_CMD_SMM_ST 0x00000001
+
+#define HP_CTL_TIME_SYNC 0x00010000
+#define HP_CTL_CAL_VAL_SHIFT 10
+#define HP_CTL_CAL_VAL_MASK (0x1f << HP_CTL_CALIB_SHIFT)
+#define HP_CTL_CALIB_EN 0x00000100
+#define HP_CTL_PI_FREQ_SHIFT 4
+#define HP_CTL_PI_FREQ_MASK (0xf << HP_CTL_PI_FREQ_SHIFT)
+#define HP_CTL_PI_EN 0x00000008
+#define HP_CTL_TIMEALARM_EN 0x00000002
+#define HP_CTL_RTC_EN 0x00000001
+
+#define HP_SECVIO_INTEN_EN 0x10000000
+#define HP_SECVIO_INTEN_SRC5 0x00000020
+#define HP_SECVIO_INTEN_SRC4 0x00000010
+#define HP_SECVIO_INTEN_SRC3 0x00000008
+#define HP_SECVIO_INTEN_SRC2 0x00000004
+#define HP_SECVIO_INTEN_SRC1 0x00000002
+#define HP_SECVIO_INTEN_SRC0 0x00000001
+#define HP_SECVIO_INTEN_ALL 0x8000003f
+
+#define HP_SECVIO_ICTL_CFG_SHIFT 30
+#define HP_SECVIO_ICTL_CFG_MASK (0x3 << HP_SECVIO_ICTL_CFG_SHIFT)
+#define HP_SECVIO_ICTL_CFG5_SHIFT 5
+#define HP_SECVIO_ICTL_CFG5_MASK (0x3 << HP_SECVIO_ICTL_CFG5_SHIFT)
+#define HP_SECVIO_ICTL_CFG_DISABLE 0
+#define HP_SECVIO_ICTL_CFG_NONFATAL 1
+#define HP_SECVIO_ICTL_CFG_FATAL 2
+#define HP_SECVIO_ICTL_CFG4_FATAL 0x00000010
+#define HP_SECVIO_ICTL_CFG3_FATAL 0x00000008
+#define HP_SECVIO_ICTL_CFG2_FATAL 0x00000004
+#define HP_SECVIO_ICTL_CFG1_FATAL 0x00000002
+#define HP_SECVIO_ICTL_CFG0_FATAL 0x00000001
+
+#define HP_STATUS_ZMK_ZERO 0x80000000
+#define HP_STATUS_OTPMK_ZERO 0x08000000
+#define HP_STATUS_OTPMK_SYN_SHIFT 16
+#define HP_STATUS_OTPMK_SYN_MASK (0x1ff << HP_STATUS_OTPMK_SYN_SHIFT)
+#define HP_STATUS_SSM_ST_SHIFT 8
+#define HP_STATUS_SSM_ST_MASK (0xf << HP_STATUS_SSM_ST_SHIFT)
+#define HP_STATUS_SSM_ST_INIT 0
+#define HP_STATUS_SSM_ST_HARDFAIL 1
+#define HP_STATUS_SSM_ST_SOFTFAIL 3
+#define HP_STATUS_SSM_ST_INITINT 8
+#define HP_STATUS_SSM_ST_CHECK 9
+#define HP_STATUS_SSM_ST_NONSECURE 11
+#define HP_STATUS_SSM_ST_TRUSTED 13
+#define HP_STATUS_SSM_ST_SECURE 15
+
+#define HP_SECVIOST_ZMK_ECC_FAIL 0x08000000 /* write to clear */
+#define HP_SECVIOST_ZMK_SYN_SHIFT 16
+#define HP_SECVIOST_ZMK_SYN_MASK (0x1ff << HP_SECVIOST_ZMK_SYN_SHIFT)
+#define HP_SECVIOST_SECVIO5 0x00000020
+#define HP_SECVIOST_SECVIO4 0x00000010
+#define HP_SECVIOST_SECVIO3 0x00000008
+#define HP_SECVIOST_SECVIO2 0x00000004
+#define HP_SECVIOST_SECVIO1 0x00000002
+#define HP_SECVIOST_SECVIO0 0x00000001
+#define HP_SECVIOST_SECVIOMASK 0x0000003f
+
+/*
+ * SNVS Low Power Domain
+ * Includes glitch detector, SRTC, alarm, monotonic counter, ZMK
+ */
+struct snvs_lp {
+ u32 lock;
+ u32 ctl;
+ u32 mstr_key_ctl; /* Master Key Control */
+ u32 secvio_ctl; /* Security Violation Control */
+ u32 tamper_filt_cfg; /* Tamper Glitch Filters Configuration */
+ u32 tamper_det_cfg; /* Tamper Detectors Configuration */
+ u32 status;
+ u32 srtc_msb; /* Secure Real Time Clock/Counter MSB */
+ u32 srtc_lsb; /* Secure Real Time Clock/Counter LSB */
+ u32 time_alarm; /* Time Alarm */
+ u32 smc_msb; /* Secure Monotonic Counter MSB */
+ u32 smc_lsb; /* Secure Monotonic Counter LSB */
+ u32 pwr_glitch_det; /* Power Glitch Detector */
+ u32 gen_purpose;
+ u32 zmk[8]; /* Zeroizable Master Key */
+};
+
+#define LP_LOCK_MKEYSEL_LCK 0x00000200
+#define LP_LOCK_TAMPDET_LCK 0x00000100
+#define LP_LOCK_TAMPFLT_LCK 0x00000080
+#define LP_LOCK_SECVIO_LCK 0x00000040
+#define LP_LOCK_GENP_LCK 0x00000020
+#define LP_LOCK_MONOCTR_LCK 0x00000010
+#define LP_LOCK_CALIB_LCK 0x00000008
+#define LP_LOCK_SRTC_LCK 0x00000004
+#define LP_LOCK_ZMK_RD_LCK 0x00000002
+#define LP_LOCK_ZMK_WT_LCK 0x00000001
+
+#define LP_CTL_CAL_VAL_SHIFT 10
+#define LP_CTL_CAL_VAL_MASK (0x1f << LP_CTL_CAL_VAL_SHIFT)
+#define LP_CTL_CALIB_EN 0x00000100
+#define LP_CTL_SRTC_INVAL_EN 0x00000010
+#define LP_CTL_WAKE_INT_EN 0x00000008
+#define LP_CTL_MONOCTR_EN 0x00000004
+#define LP_CTL_TIMEALARM_EN 0x00000002
+#define LP_CTL_SRTC_EN 0x00000001
+
+#define LP_MKEYCTL_ZMKECC_SHIFT 8
+#define LP_MKEYCTL_ZMKECC_MASK (0xff << LP_MKEYCTL_ZMKECC_SHIFT)
+#define LP_MKEYCTL_ZMKECC_EN 0x00000010
+#define LP_MKEYCTL_ZMKECC_VAL 0x00000008
+#define LP_MKEYCTL_ZMKECC_PROG 0x00000004
+#define LP_MKEYCTL_MKSEL_SHIFT 0
+#define LP_MKEYCTL_MKSEL_MASK (3 << LP_MKEYCTL_MKSEL_SHIFT)
+#define LP_MKEYCTL_MK_OTP 0
+#define LP_MKEYCTL_MK_ZMK 2
+#define LP_MKEYCTL_MK_COMB 3
+
+#define LP_SECVIO_CTL_SRC5 0x20
+#define LP_SECVIO_CTL_SRC4 0x10
+#define LP_SECVIO_CTL_SRC3 0x08
+#define LP_SECVIO_CTL_SRC2 0x04
+#define LP_SECVIO_CTL_SRC1 0x02
+#define LP_SECVIO_CTL_SRC0 0x01
+
+#define LP_TAMPFILT_EXT2_EN 0x80000000
+#define LP_TAMPFILT_EXT2_SHIFT 24
+#define LP_TAMPFILT_EXT2_MASK (0x1f << LP_TAMPFILT_EXT2_SHIFT)
+#define LP_TAMPFILT_EXT1_EN 0x00800000
+#define LP_TAMPFILT_EXT1_SHIFT 16
+#define LP_TAMPFILT_EXT1_MASK (0x1f << LP_TAMPFILT_EXT1_SHIFT)
+#define LP_TAMPFILT_WM_EN 0x00000080
+#define LP_TAMPFILT_WM_SHIFT 0
+#define LP_TAMPFILT_WM_MASK (0x1f << LP_TAMPFILT_WM_SHIFT)
+
+#define LP_TAMPDET_OSC_BPS 0x10000000
+#define LP_TAMPDET_VRC_SHIFT 24
+#define LP_TAMPDET_VRC_MASK (3 << LP_TAMPFILT_VRC_SHIFT)
+#define LP_TAMPDET_HTDC_SHIFT 20
+#define LP_TAMPDET_HTDC_MASK (3 << LP_TAMPFILT_HTDC_SHIFT)
+#define LP_TAMPDET_LTDC_SHIFT 16
+#define LP_TAMPDET_LTDC_MASK (3 << LP_TAMPFILT_LTDC_SHIFT)
+#define LP_TAMPDET_POR_OBS 0x00008000
+#define LP_TAMPDET_PFD_OBS 0x00004000
+#define LP_TAMPDET_ET2_EN 0x00000400
+#define LP_TAMPDET_ET1_EN 0x00000200
+#define LP_TAMPDET_WMT2_EN 0x00000100
+#define LP_TAMPDET_WMT1_EN 0x00000080
+#define LP_TAMPDET_VT_EN 0x00000040
+#define LP_TAMPDET_TT_EN 0x00000020
+#define LP_TAMPDET_CT_EN 0x00000010
+#define LP_TAMPDET_MCR_EN 0x00000004
+#define LP_TAMPDET_SRTCR_EN 0x00000002
+
+#define LP_STATUS_SECURE
+#define LP_STATUS_NONSECURE
+#define LP_STATUS_SCANEXIT 0x00100000 /* all write 1 clear here on */
+#define LP_STATUS_EXT_SECVIO 0x00010000
+#define LP_STATUS_ET2 0x00000400
+#define LP_STATUS_ET1 0x00000200
+#define LP_STATUS_WMT2 0x00000100
+#define LP_STATUS_WMT1 0x00000080
+#define LP_STATUS_VTD 0x00000040
+#define LP_STATUS_TTD 0x00000020
+#define LP_STATUS_CTD 0x00000010
+#define LP_STATUS_PGD 0x00000008
+#define LP_STATUS_MCR 0x00000004
+#define LP_STATUS_SRTCR 0x00000002
+#define LP_STATUS_LPTA 0x00000001
+
+/* Full SNVS register page, including version/options */
+struct snvs_full {
+ struct snvs_hp hp;
+ struct snvs_lp lp;
+ u32 rsvd[731]; /* deadspace 0x08c-0xbf7 */
+
+ /* Version / Revision / Option ID space - end of register page */
+ u32 vid; /* 0xbf8 HP Version ID (VID 1) */
+ u32 opt_rev; /* 0xbfc HP Options / Revision (VID 2) */
+};
+
+#endif /* SNVSREGS_H */