summaryrefslogtreecommitdiff
path: root/arch/arm64/crypto/sm4-ce-glue.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/crypto/sm4-ce-glue.c')
-rw-r--r--arch/arm64/crypto/sm4-ce-glue.c412
1 files changed, 124 insertions, 288 deletions
diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c
index 0a2d32ed3bde..5569cece5a0b 100644
--- a/arch/arm64/crypto/sm4-ce-glue.c
+++ b/arch/arm64/crypto/sm4-ce-glue.c
@@ -8,19 +8,18 @@
* Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <linux/cpufeature.h>
-#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/b128ops.h>
-#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
-#include <crypto/xts.h>
#include <crypto/sm4.h>
+#include <crypto/utils.h>
+#include <crypto/xts.h>
+#include <linux/cpufeature.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
#define BYTES2BLKS(nbytes) ((nbytes) >> 4)
@@ -37,10 +36,6 @@ asmlinkage void sm4_ce_cbc_cts_enc(const u32 *rkey, u8 *dst, const u8 *src,
u8 *iv, unsigned int nbytes);
asmlinkage void sm4_ce_cbc_cts_dec(const u32 *rkey, u8 *dst, const u8 *src,
u8 *iv, unsigned int nbytes);
-asmlinkage void sm4_ce_cfb_enc(const u32 *rkey, u8 *dst, const u8 *src,
- u8 *iv, unsigned int nblks);
-asmlinkage void sm4_ce_cfb_dec(const u32 *rkey, u8 *dst, const u8 *src,
- u8 *iv, unsigned int nblks);
asmlinkage void sm4_ce_ctr_enc(const u32 *rkey, u8 *dst, const u8 *src,
u8 *iv, unsigned int nblks);
asmlinkage void sm4_ce_xts_enc(const u32 *rkey1, u8 *dst, const u8 *src,
@@ -56,7 +51,6 @@ asmlinkage void sm4_ce_mac_update(const u32 *rkey_enc, u8 *digest,
EXPORT_SYMBOL(sm4_ce_expand_key);
EXPORT_SYMBOL(sm4_ce_crypt_block);
EXPORT_SYMBOL(sm4_ce_cbc_enc);
-EXPORT_SYMBOL(sm4_ce_cfb_enc);
struct sm4_xts_ctx {
struct sm4_ctx key1;
@@ -69,7 +63,6 @@ struct sm4_mac_tfm_ctx {
};
struct sm4_mac_desc_ctx {
- unsigned int len;
u8 digest[SM4_BLOCK_SIZE];
};
@@ -81,10 +74,9 @@ static int sm4_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (key_len != SM4_KEY_SIZE)
return -EINVAL;
- kernel_neon_begin();
- sm4_ce_expand_key(key, ctx->rkey_enc, ctx->rkey_dec,
- crypto_sm4_fk, crypto_sm4_ck);
- kernel_neon_end();
+ scoped_ksimd()
+ sm4_ce_expand_key(key, ctx->rkey_enc, ctx->rkey_dec,
+ crypto_sm4_fk, crypto_sm4_ck);
return 0;
}
@@ -101,12 +93,12 @@ static int sm4_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (ret)
return ret;
- kernel_neon_begin();
- sm4_ce_expand_key(key, ctx->key1.rkey_enc,
- ctx->key1.rkey_dec, crypto_sm4_fk, crypto_sm4_ck);
- sm4_ce_expand_key(&key[SM4_KEY_SIZE], ctx->key2.rkey_enc,
- ctx->key2.rkey_dec, crypto_sm4_fk, crypto_sm4_ck);
- kernel_neon_end();
+ scoped_ksimd() {
+ sm4_ce_expand_key(key, ctx->key1.rkey_enc,
+ ctx->key1.rkey_dec, crypto_sm4_fk, crypto_sm4_ck);
+ sm4_ce_expand_key(&key[SM4_KEY_SIZE], ctx->key2.rkey_enc,
+ ctx->key2.rkey_dec, crypto_sm4_fk, crypto_sm4_ck);
+ }
return 0;
}
@@ -124,16 +116,14 @@ static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey)
u8 *dst = walk.dst.virt.addr;
unsigned int nblks;
- kernel_neon_begin();
-
- nblks = BYTES2BLKS(nbytes);
- if (nblks) {
- sm4_ce_crypt(rkey, dst, src, nblks);
- nbytes -= nblks * SM4_BLOCK_SIZE;
+ scoped_ksimd() {
+ nblks = BYTES2BLKS(nbytes);
+ if (nblks) {
+ sm4_ce_crypt(rkey, dst, src, nblks);
+ nbytes -= nblks * SM4_BLOCK_SIZE;
+ }
}
- kernel_neon_end();
-
err = skcipher_walk_done(&walk, nbytes);
}
@@ -174,16 +164,14 @@ static int sm4_cbc_crypt(struct skcipher_request *req,
nblocks = nbytes / SM4_BLOCK_SIZE;
if (nblocks) {
- kernel_neon_begin();
-
- if (encrypt)
- sm4_ce_cbc_enc(ctx->rkey_enc, dst, src,
- walk.iv, nblocks);
- else
- sm4_ce_cbc_dec(ctx->rkey_dec, dst, src,
- walk.iv, nblocks);
-
- kernel_neon_end();
+ scoped_ksimd() {
+ if (encrypt)
+ sm4_ce_cbc_enc(ctx->rkey_enc, dst, src,
+ walk.iv, nblocks);
+ else
+ sm4_ce_cbc_dec(ctx->rkey_dec, dst, src,
+ walk.iv, nblocks);
+ }
}
err = skcipher_walk_done(&walk, nbytes % SM4_BLOCK_SIZE);
@@ -256,16 +244,14 @@ static int sm4_cbc_cts_crypt(struct skcipher_request *req, bool encrypt)
if (err)
return err;
- kernel_neon_begin();
-
- if (encrypt)
- sm4_ce_cbc_cts_enc(ctx->rkey_enc, walk.dst.virt.addr,
- walk.src.virt.addr, walk.iv, walk.nbytes);
- else
- sm4_ce_cbc_cts_dec(ctx->rkey_dec, walk.dst.virt.addr,
- walk.src.virt.addr, walk.iv, walk.nbytes);
-
- kernel_neon_end();
+ scoped_ksimd() {
+ if (encrypt)
+ sm4_ce_cbc_cts_enc(ctx->rkey_enc, walk.dst.virt.addr,
+ walk.src.virt.addr, walk.iv, walk.nbytes);
+ else
+ sm4_ce_cbc_cts_dec(ctx->rkey_dec, walk.dst.virt.addr,
+ walk.src.virt.addr, walk.iv, walk.nbytes);
+ }
return skcipher_walk_done(&walk, 0);
}
@@ -280,90 +266,6 @@ static int sm4_cbc_cts_decrypt(struct skcipher_request *req)
return sm4_cbc_cts_crypt(req, false);
}
-static int sm4_cfb_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while ((nbytes = walk.nbytes) > 0) {
- const u8 *src = walk.src.virt.addr;
- u8 *dst = walk.dst.virt.addr;
- unsigned int nblks;
-
- kernel_neon_begin();
-
- nblks = BYTES2BLKS(nbytes);
- if (nblks) {
- sm4_ce_cfb_enc(ctx->rkey_enc, dst, src, walk.iv, nblks);
- dst += nblks * SM4_BLOCK_SIZE;
- src += nblks * SM4_BLOCK_SIZE;
- nbytes -= nblks * SM4_BLOCK_SIZE;
- }
-
- /* tail */
- if (walk.nbytes == walk.total && nbytes > 0) {
- u8 keystream[SM4_BLOCK_SIZE];
-
- sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv);
- crypto_xor_cpy(dst, src, keystream, nbytes);
- nbytes = 0;
- }
-
- kernel_neon_end();
-
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- return err;
-}
-
-static int sm4_cfb_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while ((nbytes = walk.nbytes) > 0) {
- const u8 *src = walk.src.virt.addr;
- u8 *dst = walk.dst.virt.addr;
- unsigned int nblks;
-
- kernel_neon_begin();
-
- nblks = BYTES2BLKS(nbytes);
- if (nblks) {
- sm4_ce_cfb_dec(ctx->rkey_enc, dst, src, walk.iv, nblks);
- dst += nblks * SM4_BLOCK_SIZE;
- src += nblks * SM4_BLOCK_SIZE;
- nbytes -= nblks * SM4_BLOCK_SIZE;
- }
-
- /* tail */
- if (walk.nbytes == walk.total && nbytes > 0) {
- u8 keystream[SM4_BLOCK_SIZE];
-
- sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv);
- crypto_xor_cpy(dst, src, keystream, nbytes);
- nbytes = 0;
- }
-
- kernel_neon_end();
-
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- return err;
-}
-
static int sm4_ctr_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -379,28 +281,26 @@ static int sm4_ctr_crypt(struct skcipher_request *req)
u8 *dst = walk.dst.virt.addr;
unsigned int nblks;
- kernel_neon_begin();
-
- nblks = BYTES2BLKS(nbytes);
- if (nblks) {
- sm4_ce_ctr_enc(ctx->rkey_enc, dst, src, walk.iv, nblks);
- dst += nblks * SM4_BLOCK_SIZE;
- src += nblks * SM4_BLOCK_SIZE;
- nbytes -= nblks * SM4_BLOCK_SIZE;
- }
-
- /* tail */
- if (walk.nbytes == walk.total && nbytes > 0) {
- u8 keystream[SM4_BLOCK_SIZE];
-
- sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv);
- crypto_inc(walk.iv, SM4_BLOCK_SIZE);
- crypto_xor_cpy(dst, src, keystream, nbytes);
- nbytes = 0;
+ scoped_ksimd() {
+ nblks = BYTES2BLKS(nbytes);
+ if (nblks) {
+ sm4_ce_ctr_enc(ctx->rkey_enc, dst, src, walk.iv, nblks);
+ dst += nblks * SM4_BLOCK_SIZE;
+ src += nblks * SM4_BLOCK_SIZE;
+ nbytes -= nblks * SM4_BLOCK_SIZE;
+ }
+
+ /* tail */
+ if (walk.nbytes == walk.total && nbytes > 0) {
+ u8 keystream[SM4_BLOCK_SIZE];
+
+ sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv);
+ crypto_inc(walk.iv, SM4_BLOCK_SIZE);
+ crypto_xor_cpy(dst, src, keystream, nbytes);
+ nbytes = 0;
+ }
}
- kernel_neon_end();
-
err = skcipher_walk_done(&walk, nbytes);
}
@@ -450,18 +350,16 @@ static int sm4_xts_crypt(struct skcipher_request *req, bool encrypt)
if (nbytes < walk.total)
nbytes &= ~(SM4_BLOCK_SIZE - 1);
- kernel_neon_begin();
-
- if (encrypt)
- sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr,
- walk.src.virt.addr, walk.iv, nbytes,
- rkey2_enc);
- else
- sm4_ce_xts_dec(ctx->key1.rkey_dec, walk.dst.virt.addr,
- walk.src.virt.addr, walk.iv, nbytes,
- rkey2_enc);
-
- kernel_neon_end();
+ scoped_ksimd() {
+ if (encrypt)
+ sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr,
+ walk.src.virt.addr, walk.iv, nbytes,
+ rkey2_enc);
+ else
+ sm4_ce_xts_dec(ctx->key1.rkey_dec, walk.dst.virt.addr,
+ walk.src.virt.addr, walk.iv, nbytes,
+ rkey2_enc);
+ }
rkey2_enc = NULL;
@@ -486,18 +384,16 @@ static int sm4_xts_crypt(struct skcipher_request *req, bool encrypt)
if (err)
return err;
- kernel_neon_begin();
-
- if (encrypt)
- sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr,
- walk.src.virt.addr, walk.iv, walk.nbytes,
- rkey2_enc);
- else
- sm4_ce_xts_dec(ctx->key1.rkey_dec, walk.dst.virt.addr,
- walk.src.virt.addr, walk.iv, walk.nbytes,
- rkey2_enc);
-
- kernel_neon_end();
+ scoped_ksimd() {
+ if (encrypt)
+ sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr,
+ walk.src.virt.addr, walk.iv, walk.nbytes,
+ rkey2_enc);
+ else
+ sm4_ce_xts_dec(ctx->key1.rkey_dec, walk.dst.virt.addr,
+ walk.src.virt.addr, walk.iv, walk.nbytes,
+ rkey2_enc);
+ }
return skcipher_walk_done(&walk, 0);
}
@@ -544,22 +440,6 @@ static struct skcipher_alg sm4_algs[] = {
.decrypt = sm4_cbc_decrypt,
}, {
.base = {
- .cra_name = "cfb(sm4)",
- .cra_driver_name = "cfb-sm4-ce",
- .cra_priority = 400,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct sm4_ctx),
- .cra_module = THIS_MODULE,
- },
- .min_keysize = SM4_KEY_SIZE,
- .max_keysize = SM4_KEY_SIZE,
- .ivsize = SM4_BLOCK_SIZE,
- .chunksize = SM4_BLOCK_SIZE,
- .setkey = sm4_setkey,
- .encrypt = sm4_cfb_encrypt,
- .decrypt = sm4_cfb_decrypt,
- }, {
- .base = {
.cra_name = "ctr(sm4)",
.cra_driver_name = "ctr-sm4-ce",
.cra_priority = 400,
@@ -617,11 +497,9 @@ static int sm4_cbcmac_setkey(struct crypto_shash *tfm, const u8 *key,
if (key_len != SM4_KEY_SIZE)
return -EINVAL;
- kernel_neon_begin();
- sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
- crypto_sm4_fk, crypto_sm4_ck);
- kernel_neon_end();
-
+ scoped_ksimd()
+ sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
+ crypto_sm4_fk, crypto_sm4_ck);
return 0;
}
@@ -637,15 +515,13 @@ static int sm4_cmac_setkey(struct crypto_shash *tfm, const u8 *key,
memset(consts, 0, SM4_BLOCK_SIZE);
- kernel_neon_begin();
+ scoped_ksimd() {
+ sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
+ crypto_sm4_fk, crypto_sm4_ck);
- sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
- crypto_sm4_fk, crypto_sm4_ck);
-
- /* encrypt the zero block */
- sm4_ce_crypt_block(ctx->key.rkey_enc, (u8 *)consts, (const u8 *)consts);
-
- kernel_neon_end();
+ /* encrypt the zero block */
+ sm4_ce_crypt_block(ctx->key.rkey_enc, (u8 *)consts, (const u8 *)consts);
+ }
/* gf(2^128) multiply zero-ciphertext with u and u^2 */
a = be64_to_cpu(consts[0].a);
@@ -675,18 +551,16 @@ static int sm4_xcbc_setkey(struct crypto_shash *tfm, const u8 *key,
if (key_len != SM4_KEY_SIZE)
return -EINVAL;
- kernel_neon_begin();
-
- sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
- crypto_sm4_fk, crypto_sm4_ck);
+ scoped_ksimd() {
+ sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
+ crypto_sm4_fk, crypto_sm4_ck);
- sm4_ce_crypt_block(ctx->key.rkey_enc, key2, ks[0]);
- sm4_ce_crypt(ctx->key.rkey_enc, ctx->consts, ks[1], 2);
+ sm4_ce_crypt_block(ctx->key.rkey_enc, key2, ks[0]);
+ sm4_ce_crypt(ctx->key.rkey_enc, ctx->consts, ks[1], 2);
- sm4_ce_expand_key(key2, ctx->key.rkey_enc, ctx->key.rkey_dec,
- crypto_sm4_fk, crypto_sm4_ck);
-
- kernel_neon_end();
+ sm4_ce_expand_key(key2, ctx->key.rkey_enc, ctx->key.rkey_dec,
+ crypto_sm4_fk, crypto_sm4_ck);
+ }
return 0;
}
@@ -696,8 +570,6 @@ static int sm4_mac_init(struct shash_desc *desc)
struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
memset(ctx->digest, 0, SM4_BLOCK_SIZE);
- ctx->len = 0;
-
return 0;
}
@@ -706,87 +578,47 @@ static int sm4_mac_update(struct shash_desc *desc, const u8 *p,
{
struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
- unsigned int l, nblocks;
-
- if (len == 0)
- return 0;
-
- if (ctx->len || ctx->len + len < SM4_BLOCK_SIZE) {
- l = min(len, SM4_BLOCK_SIZE - ctx->len);
-
- crypto_xor(ctx->digest + ctx->len, p, l);
- ctx->len += l;
- len -= l;
- p += l;
- }
-
- if (len && (ctx->len % SM4_BLOCK_SIZE) == 0) {
- kernel_neon_begin();
+ unsigned int nblocks = len / SM4_BLOCK_SIZE;
- if (len < SM4_BLOCK_SIZE && ctx->len == SM4_BLOCK_SIZE) {
- sm4_ce_crypt_block(tctx->key.rkey_enc,
- ctx->digest, ctx->digest);
- ctx->len = 0;
- } else {
- nblocks = len / SM4_BLOCK_SIZE;
- len %= SM4_BLOCK_SIZE;
-
- sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, p,
- nblocks, (ctx->len == SM4_BLOCK_SIZE),
- (len != 0));
-
- p += nblocks * SM4_BLOCK_SIZE;
-
- if (len == 0)
- ctx->len = SM4_BLOCK_SIZE;
- }
-
- kernel_neon_end();
-
- if (len) {
- crypto_xor(ctx->digest, p, len);
- ctx->len = len;
- }
- }
-
- return 0;
+ len %= SM4_BLOCK_SIZE;
+ scoped_ksimd()
+ sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, p,
+ nblocks, false, true);
+ return len;
}
-static int sm4_cmac_final(struct shash_desc *desc, u8 *out)
+static int sm4_cmac_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
const u8 *consts = tctx->consts;
- if (ctx->len != SM4_BLOCK_SIZE) {
- ctx->digest[ctx->len] ^= 0x80;
+ crypto_xor(ctx->digest, src, len);
+ if (len != SM4_BLOCK_SIZE) {
+ ctx->digest[len] ^= 0x80;
consts += SM4_BLOCK_SIZE;
}
-
- kernel_neon_begin();
- sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, consts, 1,
- false, true);
- kernel_neon_end();
-
+ scoped_ksimd()
+ sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, consts, 1,
+ false, true);
memcpy(out, ctx->digest, SM4_BLOCK_SIZE);
-
return 0;
}
-static int sm4_cbcmac_final(struct shash_desc *desc, u8 *out)
+static int sm4_cbcmac_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
- if (ctx->len) {
- kernel_neon_begin();
- sm4_ce_crypt_block(tctx->key.rkey_enc, ctx->digest,
- ctx->digest);
- kernel_neon_end();
+ if (len) {
+ crypto_xor(ctx->digest, src, len);
+ scoped_ksimd()
+ sm4_ce_crypt_block(tctx->key.rkey_enc, ctx->digest,
+ ctx->digest);
}
-
memcpy(out, ctx->digest, SM4_BLOCK_SIZE);
-
return 0;
}
@@ -796,6 +628,8 @@ static struct shash_alg sm4_mac_algs[] = {
.cra_name = "cmac(sm4)",
.cra_driver_name = "cmac-sm4-ce",
.cra_priority = 400,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx)
+ SM4_BLOCK_SIZE * 2,
@@ -804,7 +638,7 @@ static struct shash_alg sm4_mac_algs[] = {
.digestsize = SM4_BLOCK_SIZE,
.init = sm4_mac_init,
.update = sm4_mac_update,
- .final = sm4_cmac_final,
+ .finup = sm4_cmac_finup,
.setkey = sm4_cmac_setkey,
.descsize = sizeof(struct sm4_mac_desc_ctx),
}, {
@@ -812,6 +646,8 @@ static struct shash_alg sm4_mac_algs[] = {
.cra_name = "xcbc(sm4)",
.cra_driver_name = "xcbc-sm4-ce",
.cra_priority = 400,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx)
+ SM4_BLOCK_SIZE * 2,
@@ -820,7 +656,7 @@ static struct shash_alg sm4_mac_algs[] = {
.digestsize = SM4_BLOCK_SIZE,
.init = sm4_mac_init,
.update = sm4_mac_update,
- .final = sm4_cmac_final,
+ .finup = sm4_cmac_finup,
.setkey = sm4_xcbc_setkey,
.descsize = sizeof(struct sm4_mac_desc_ctx),
}, {
@@ -828,14 +664,15 @@ static struct shash_alg sm4_mac_algs[] = {
.cra_name = "cbcmac(sm4)",
.cra_driver_name = "cbcmac-sm4-ce",
.cra_priority = 400,
- .cra_blocksize = 1,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
+ .cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx),
.cra_module = THIS_MODULE,
},
.digestsize = SM4_BLOCK_SIZE,
.init = sm4_mac_init,
.update = sm4_mac_update,
- .final = sm4_cbcmac_final,
+ .finup = sm4_cbcmac_finup,
.setkey = sm4_cbcmac_setkey,
.descsize = sizeof(struct sm4_mac_desc_ctx),
}
@@ -869,12 +706,11 @@ static void __exit sm4_exit(void)
module_cpu_feature_match(SM4, sm4_init);
module_exit(sm4_exit);
-MODULE_DESCRIPTION("SM4 ECB/CBC/CFB/CTR/XTS using ARMv8 Crypto Extensions");
+MODULE_DESCRIPTION("SM4 ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
MODULE_ALIAS_CRYPTO("sm4-ce");
MODULE_ALIAS_CRYPTO("sm4");
MODULE_ALIAS_CRYPTO("ecb(sm4)");
MODULE_ALIAS_CRYPTO("cbc(sm4)");
-MODULE_ALIAS_CRYPTO("cfb(sm4)");
MODULE_ALIAS_CRYPTO("ctr(sm4)");
MODULE_ALIAS_CRYPTO("cts(cbc(sm4))");
MODULE_ALIAS_CRYPTO("xts(sm4)");