summaryrefslogtreecommitdiff
path: root/arch/arm64/crypto
diff options
context:
space:
mode:
authorTianjia Zhang <tianjia.zhang@linux.alibaba.com>2023-02-01 20:32:07 +0800
committerHerbert Xu <herbert@gondor.apana.org.au>2023-02-10 17:20:19 +0800
commit3b9d902153f31998a30b0cf4a0aeb090a05005d3 (patch)
treeb0c9a2c7cfcd72bdc7e0d67929f47f7eb34bb95a /arch/arm64/crypto
parentd58fa987be772a9a1e76302d80c1a1e0e15c7d8a (diff)
crypto: arm64/sm4-ccm - Rewrite skcipher walker loop
The fact that an error in the skcipher walker API are indicated not only by a non-zero return value, but also by the fact that walk->nbytes is zero, causes the layout of the skcipher walker loop to be sufficiently different from the usual layout, which is not a problem in itself, but it is likely to cause reading confusion and difficulty in code maintenance. This patch rewrites skcipher walker loop, and separates the last chunk cryption from the loop to avoid wrong calls to the skcipher walker API. In addition to following the usual convention of checking walk->nbytes, it also makes the loop execute logic clearer and easier to understand. Signed-off-by: Tianjia Zhang <tianjia.zhang@linux.alibaba.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/arm64/crypto')
-rw-r--r--arch/arm64/crypto/sm4-ce-ccm-glue.c44
1 files changed, 24 insertions, 20 deletions
diff --git a/arch/arm64/crypto/sm4-ce-ccm-glue.c b/arch/arm64/crypto/sm4-ce-ccm-glue.c
index f2cec7b52efc..5e7e17bbec81 100644
--- a/arch/arm64/crypto/sm4-ce-ccm-glue.c
+++ b/arch/arm64/crypto/sm4-ce-ccm-glue.c
@@ -166,7 +166,7 @@ static int ccm_crypt(struct aead_request *req, struct skcipher_walk *walk,
unsigned int nbytes, u8 *mac))
{
u8 __aligned(8) ctr0[SM4_BLOCK_SIZE];
- int err;
+ int err = 0;
/* preserve the initial ctr0 for the TAG */
memcpy(ctr0, walk->iv, SM4_BLOCK_SIZE);
@@ -177,33 +177,37 @@ static int ccm_crypt(struct aead_request *req, struct skcipher_walk *walk,
if (req->assoclen)
ccm_calculate_auth_mac(req, mac);
- do {
+ while (walk->nbytes && walk->nbytes != walk->total) {
unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE;
- const u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- if (walk->nbytes == walk->total)
- tail = 0;
+ sm4_ce_ccm_crypt(rkey_enc, walk->dst.virt.addr,
+ walk->src.virt.addr, walk->iv,
+ walk->nbytes - tail, mac);
+
+ kernel_neon_end();
+
+ err = skcipher_walk_done(walk, tail);
+
+ kernel_neon_begin();
+ }
- if (walk->nbytes - tail)
- sm4_ce_ccm_crypt(rkey_enc, dst, src, walk->iv,
- walk->nbytes - tail, mac);
+ if (walk->nbytes) {
+ sm4_ce_ccm_crypt(rkey_enc, walk->dst.virt.addr,
+ walk->src.virt.addr, walk->iv,
+ walk->nbytes, mac);
- if (walk->nbytes == walk->total)
- sm4_ce_ccm_final(rkey_enc, ctr0, mac);
+ sm4_ce_ccm_final(rkey_enc, ctr0, mac);
kernel_neon_end();
- if (walk->nbytes) {
- err = skcipher_walk_done(walk, tail);
- if (err)
- return err;
- if (walk->nbytes)
- kernel_neon_begin();
- }
- } while (walk->nbytes > 0);
+ err = skcipher_walk_done(walk, 0);
+ } else {
+ sm4_ce_ccm_final(rkey_enc, ctr0, mac);
- return 0;
+ kernel_neon_end();
+ }
+
+ return err;
}
static int ccm_encrypt(struct aead_request *req)