summaryrefslogtreecommitdiff
path: root/arch/s390/crypto/aes_s390.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2016-08-15 10:41:52 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2016-08-29 11:05:07 +0200
commit0177db01adf26cf9c5dfe1feaf17087de4b9e40e (patch)
treea6f228b175f6094db94e80fdc4e1a37a75d2998d /arch/s390/crypto/aes_s390.c
parentedc63a3785b48455e05793e848f0174e21f38d09 (diff)
s390/crypto: simplify return code handling
The CPACF instructions can complete with three different condition codes: CC=0 for successful completion, CC=1 if the protected key verification failed, and CC=3 for partial completion. The inline functions will restart the CPACF instruction for partial completion, this removes the CC=3 case. The CC=1 case is only relevant for the protected key functions of the KM, KMC, KMAC and KMCTR instructions. As the protected key functions are not used by the current code, there is no need for any kind of return code handling. Reviewed-by: Harald Freudenberger <freude@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/crypto/aes_s390.c')
-rw-r--r--arch/s390/crypto/aes_s390.c28
1 files changed, 6 insertions, 22 deletions
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 9da54698b87a..4eb8de417419 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -324,9 +324,7 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
- ret = cpacf_km(func, param, out, in, n);
- if (ret < 0 || ret != n)
- return -EIO;
+ cpacf_km(func, param, out, in, n);
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -460,9 +458,7 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
- ret = cpacf_kmc(func, &param, out, in, n);
- if (ret < 0 || ret != n)
- return -EIO;
+ cpacf_kmc(func, &param, out, in, n);
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -640,9 +636,7 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
/* remove decipher modifier bit from 'func' and call PCC */
- ret = cpacf_pcc(func & 0x7f, &pcc_param.key[offset]);
- if (ret < 0)
- return -EIO;
+ cpacf_pcc(func & 0x7f, &pcc_param.key[offset]);
memcpy(xts_param.key, xts_ctx->key, 32);
memcpy(xts_param.init, pcc_param.xts, 16);
@@ -652,9 +646,7 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
- ret = cpacf_km(func, &xts_param.key[offset], out, in, n);
- if (ret < 0 || ret != n)
- return -EIO;
+ cpacf_km(func, &xts_param.key[offset], out, in, n);
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -798,12 +790,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
n = __ctrblk_init(ctrptr, nbytes);
else
n = AES_BLOCK_SIZE;
- ret = cpacf_kmctr(func, sctx->key, out, in, n, ctrptr);
- if (ret < 0 || ret != n) {
- if (ctrptr == ctrblk)
- spin_unlock(&ctrblk_lock);
- return -EIO;
- }
+ cpacf_kmctr(func, sctx->key, out, in, n, ctrptr);
if (n > AES_BLOCK_SIZE)
memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
@@ -830,10 +817,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
if (nbytes) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
- ret = cpacf_kmctr(func, sctx->key, buf, in,
- AES_BLOCK_SIZE, ctrbuf);
- if (ret < 0 || ret != AES_BLOCK_SIZE)
- return -EIO;
+ cpacf_kmctr(func, sctx->key, buf, in, AES_BLOCK_SIZE, ctrbuf);
memcpy(out, buf, nbytes);
crypto_inc(ctrbuf, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);