diff options
Diffstat (limited to 'arch/arm64/crypto/aes-glue.c')
| -rw-r--r-- | arch/arm64/crypto/aes-glue.c | 365 |
1 files changed, 182 insertions, 183 deletions
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 30b7cc6a7079..b087b900d279 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -5,26 +5,27 @@ * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> */ -#include <asm/neon.h> -#include <asm/hwcap.h> -#include <asm/simd.h> #include <crypto/aes.h> #include <crypto/ctr.h> -#include <crypto/sha2.h> #include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> -#include <linux/module.h> -#include <linux/cpufeature.h> +#include <crypto/sha2.h> +#include <crypto/utils.h> #include <crypto/xts.h> +#include <linux/cpufeature.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> + +#include <asm/hwcap.h> +#include <asm/simd.h> #include "aes-ce-setkey.h" #ifdef USE_V8_CRYPTO_EXTENSIONS #define MODE "ce" #define PRIO 300 -#define STRIDE 5 #define aes_expandkey ce_aes_expandkey #define aes_ecb_encrypt ce_aes_ecb_encrypt #define aes_ecb_decrypt ce_aes_ecb_decrypt @@ -35,14 +36,14 @@ #define aes_essiv_cbc_encrypt ce_aes_essiv_cbc_encrypt #define aes_essiv_cbc_decrypt ce_aes_essiv_cbc_decrypt #define aes_ctr_encrypt ce_aes_ctr_encrypt +#define aes_xctr_encrypt ce_aes_xctr_encrypt #define aes_xts_encrypt ce_aes_xts_encrypt #define aes_xts_decrypt ce_aes_xts_decrypt #define aes_mac_update ce_aes_mac_update -MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions"); +MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS/XCTR using ARMv8 Crypto Extensions"); #else #define MODE "neon" #define PRIO 200 -#define STRIDE 4 #define aes_ecb_encrypt neon_aes_ecb_encrypt #define aes_ecb_decrypt neon_aes_ecb_decrypt #define aes_cbc_encrypt neon_aes_cbc_encrypt @@ -52,16 +53,18 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions"); #define aes_essiv_cbc_encrypt neon_aes_essiv_cbc_encrypt #define aes_essiv_cbc_decrypt neon_aes_essiv_cbc_decrypt #define aes_ctr_encrypt neon_aes_ctr_encrypt +#define aes_xctr_encrypt neon_aes_xctr_encrypt #define aes_xts_encrypt neon_aes_xts_encrypt #define aes_xts_decrypt neon_aes_xts_decrypt #define aes_mac_update neon_aes_mac_update -MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON"); +MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS/XCTR using ARMv8 NEON"); #endif #if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS) MODULE_ALIAS_CRYPTO("ecb(aes)"); MODULE_ALIAS_CRYPTO("cbc(aes)"); MODULE_ALIAS_CRYPTO("ctr(aes)"); MODULE_ALIAS_CRYPTO("xts(aes)"); +MODULE_ALIAS_CRYPTO("xctr(aes)"); #endif MODULE_ALIAS_CRYPTO("cts(cbc(aes))"); MODULE_ALIAS_CRYPTO("essiv(cbc(aes),sha256)"); @@ -89,7 +92,10 @@ asmlinkage void aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[], int rounds, int bytes, u8 const iv[]); asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[], - int rounds, int bytes, u8 ctr[], u8 finalbuf[]); + int rounds, int bytes, u8 ctr[]); + +asmlinkage void aes_xctr_encrypt(u8 out[], u8 const in[], u32 const rk[], + int rounds, int bytes, u8 ctr[], int byte_ctr); asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[], int rounds, int bytes, u32 const rk2[], u8 iv[], @@ -117,7 +123,6 @@ struct crypto_aes_xts_ctx { struct crypto_aes_essiv_cbc_ctx { struct crypto_aes_ctx key1; struct crypto_aes_ctx __aligned(8) key2; - struct crypto_shash *hash; }; struct mac_tfm_ctx { @@ -126,7 +131,6 @@ struct mac_tfm_ctx { }; struct mac_desc_ctx { - unsigned int len; u8 dg[AES_BLOCK_SIZE]; }; @@ -167,7 +171,7 @@ static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm, if (ret) return ret; - crypto_shash_tfm_digest(ctx->hash, in_key, key_len, digest); + sha256(in_key, key_len, digest); return aes_expandkey(&ctx->key2, digest, sizeof(digest)); } @@ -183,10 +187,9 @@ static int __maybe_unused ecb_encrypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, false); while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { - kernel_neon_begin(); - aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, rounds, blocks); - kernel_neon_end(); + scoped_ksimd() + aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_enc, rounds, blocks); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } return err; @@ -203,10 +206,9 @@ static int __maybe_unused ecb_decrypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, false); while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { - kernel_neon_begin(); - aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_dec, rounds, blocks); - kernel_neon_end(); + scoped_ksimd() + aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_dec, rounds, blocks); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } return err; @@ -221,10 +223,9 @@ static int cbc_encrypt_walk(struct skcipher_request *req, unsigned int blocks; while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) { - kernel_neon_begin(); - aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr, - ctx->key_enc, rounds, blocks, walk->iv); - kernel_neon_end(); + scoped_ksimd() + aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr, + ctx->key_enc, rounds, blocks, walk->iv); err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE); } return err; @@ -250,10 +251,9 @@ static int cbc_decrypt_walk(struct skcipher_request *req, unsigned int blocks; while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) { - kernel_neon_begin(); - aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr, - ctx->key_dec, rounds, blocks, walk->iv); - kernel_neon_end(); + scoped_ksimd() + aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr, + ctx->key_dec, rounds, blocks, walk->iv); err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE); } return err; @@ -319,10 +319,9 @@ static int cts_cbc_encrypt(struct skcipher_request *req) if (err) return err; - kernel_neon_begin(); - aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, rounds, walk.nbytes, walk.iv); - kernel_neon_end(); + scoped_ksimd() + aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_enc, rounds, walk.nbytes, walk.iv); return skcipher_walk_done(&walk, 0); } @@ -376,30 +375,13 @@ static int cts_cbc_decrypt(struct skcipher_request *req) if (err) return err; - kernel_neon_begin(); - aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_dec, rounds, walk.nbytes, walk.iv); - kernel_neon_end(); + scoped_ksimd() + aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_dec, rounds, walk.nbytes, walk.iv); return skcipher_walk_done(&walk, 0); } -static int __maybe_unused essiv_cbc_init_tfm(struct crypto_skcipher *tfm) -{ - struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - - ctx->hash = crypto_alloc_shash("sha256", 0, 0); - - return PTR_ERR_OR_ZERO(ctx->hash); -} - -static void __maybe_unused essiv_cbc_exit_tfm(struct crypto_skcipher *tfm) -{ - struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_free_shash(ctx->hash); -} - static int __maybe_unused essiv_cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); @@ -412,11 +394,11 @@ static int __maybe_unused essiv_cbc_encrypt(struct skcipher_request *req) blocks = walk.nbytes / AES_BLOCK_SIZE; if (blocks) { - kernel_neon_begin(); - aes_essiv_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key1.key_enc, rounds, blocks, - req->iv, ctx->key2.key_enc); - kernel_neon_end(); + scoped_ksimd() + aes_essiv_cbc_encrypt(walk.dst.virt.addr, + walk.src.virt.addr, + ctx->key1.key_enc, rounds, blocks, + req->iv, ctx->key2.key_enc); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } return err ?: cbc_encrypt_walk(req, &walk); @@ -434,16 +416,61 @@ static int __maybe_unused essiv_cbc_decrypt(struct skcipher_request *req) blocks = walk.nbytes / AES_BLOCK_SIZE; if (blocks) { - kernel_neon_begin(); - aes_essiv_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key1.key_dec, rounds, blocks, - req->iv, ctx->key2.key_enc); - kernel_neon_end(); + scoped_ksimd() + aes_essiv_cbc_decrypt(walk.dst.virt.addr, + walk.src.virt.addr, + ctx->key1.key_dec, rounds, blocks, + req->iv, ctx->key2.key_enc); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } return err ?: cbc_decrypt_walk(req, &walk); } +static int __maybe_unused xctr_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + int err, rounds = 6 + ctx->key_length / 4; + struct skcipher_walk walk; + unsigned int byte_ctr = 0; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes > 0) { + const u8 *src = walk.src.virt.addr; + unsigned int nbytes = walk.nbytes; + u8 *dst = walk.dst.virt.addr; + u8 buf[AES_BLOCK_SIZE]; + + /* + * If given less than 16 bytes, we must copy the partial block + * into a temporary buffer of 16 bytes to avoid out of bounds + * reads and writes. Furthermore, this code is somewhat unusual + * in that it expects the end of the data to be at the end of + * the temporary buffer, rather than the start of the data at + * the start of the temporary buffer. + */ + if (unlikely(nbytes < AES_BLOCK_SIZE)) + src = dst = memcpy(buf + sizeof(buf) - nbytes, + src, nbytes); + else if (nbytes < walk.total) + nbytes &= ~(AES_BLOCK_SIZE - 1); + + scoped_ksimd() + aes_xctr_encrypt(dst, src, ctx->key_enc, rounds, nbytes, + walk.iv, byte_ctr); + + if (unlikely(nbytes < AES_BLOCK_SIZE)) + memcpy(walk.dst.virt.addr, + buf + sizeof(buf) - nbytes, nbytes); + byte_ctr += nbytes; + + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + } + + return err; +} + static int __maybe_unused ctr_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); @@ -458,26 +485,28 @@ static int __maybe_unused ctr_encrypt(struct skcipher_request *req) unsigned int nbytes = walk.nbytes; u8 *dst = walk.dst.virt.addr; u8 buf[AES_BLOCK_SIZE]; - unsigned int tail; + /* + * If given less than 16 bytes, we must copy the partial block + * into a temporary buffer of 16 bytes to avoid out of bounds + * reads and writes. Furthermore, this code is somewhat unusual + * in that it expects the end of the data to be at the end of + * the temporary buffer, rather than the start of the data at + * the start of the temporary buffer. + */ if (unlikely(nbytes < AES_BLOCK_SIZE)) - src = memcpy(buf, src, nbytes); + src = dst = memcpy(buf + sizeof(buf) - nbytes, + src, nbytes); else if (nbytes < walk.total) nbytes &= ~(AES_BLOCK_SIZE - 1); - kernel_neon_begin(); - aes_ctr_encrypt(dst, src, ctx->key_enc, rounds, nbytes, - walk.iv, buf); - kernel_neon_end(); + scoped_ksimd() + aes_ctr_encrypt(dst, src, ctx->key_enc, rounds, nbytes, + walk.iv); - tail = nbytes % (STRIDE * AES_BLOCK_SIZE); - if (tail > 0 && tail < AES_BLOCK_SIZE) - /* - * The final partial block could not be returned using - * an overlapping store, so it was passed via buf[] - * instead. - */ - memcpy(dst + nbytes - tail, buf, tail); + if (unlikely(nbytes < AES_BLOCK_SIZE)) + memcpy(walk.dst.virt.addr, + buf + sizeof(buf) - nbytes, nbytes); err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } @@ -526,11 +555,10 @@ static int __maybe_unused xts_encrypt(struct skcipher_request *req) if (walk.nbytes < walk.total) nbytes &= ~(AES_BLOCK_SIZE - 1); - kernel_neon_begin(); - aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key1.key_enc, rounds, nbytes, - ctx->key2.key_enc, walk.iv, first); - kernel_neon_end(); + scoped_ksimd() + aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key1.key_enc, rounds, nbytes, + ctx->key2.key_enc, walk.iv, first); err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } @@ -548,11 +576,10 @@ static int __maybe_unused xts_encrypt(struct skcipher_request *req) if (err) return err; - kernel_neon_begin(); - aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key1.key_enc, rounds, walk.nbytes, - ctx->key2.key_enc, walk.iv, first); - kernel_neon_end(); + scoped_ksimd() + aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key1.key_enc, rounds, walk.nbytes, + ctx->key2.key_enc, walk.iv, first); return skcipher_walk_done(&walk, 0); } @@ -598,11 +625,10 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req) if (walk.nbytes < walk.total) nbytes &= ~(AES_BLOCK_SIZE - 1); - kernel_neon_begin(); - aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key1.key_dec, rounds, nbytes, - ctx->key2.key_enc, walk.iv, first); - kernel_neon_end(); + scoped_ksimd() + aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key1.key_dec, rounds, nbytes, + ctx->key2.key_enc, walk.iv, first); err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } @@ -621,11 +647,10 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req) return err; - kernel_neon_begin(); - aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key1.key_dec, rounds, walk.nbytes, - ctx->key2.key_enc, walk.iv, first); - kernel_neon_end(); + scoped_ksimd() + aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key1.key_dec, rounds, walk.nbytes, + ctx->key2.key_enc, walk.iv, first); return skcipher_walk_done(&walk, 0); } @@ -678,6 +703,22 @@ static struct skcipher_alg aes_algs[] = { { .decrypt = ctr_encrypt, }, { .base = { + .cra_name = "xctr(aes)", + .cra_driver_name = "xctr-aes-" MODE, + .cra_priority = PRIO, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct crypto_aes_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .chunksize = AES_BLOCK_SIZE, + .setkey = skcipher_aes_setkey, + .encrypt = xctr_encrypt, + .decrypt = xctr_encrypt, +}, { + .base = { .cra_name = "xts(aes)", .cra_driver_name = "xts-aes-" MODE, .cra_priority = PRIO, @@ -724,8 +765,6 @@ static struct skcipher_alg aes_algs[] = { { .setkey = essiv_cbc_set_key, .encrypt = essiv_cbc_encrypt, .decrypt = essiv_cbc_decrypt, - .init = essiv_cbc_init_tfm, - .exit = essiv_cbc_exit_tfm, } }; static int cbcmac_setkey(struct crypto_shash *tfm, const u8 *in_key, @@ -758,10 +797,9 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key, return err; /* encrypt the zero vector */ - kernel_neon_begin(); - aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, ctx->key.key_enc, - rounds, 1); - kernel_neon_end(); + scoped_ksimd() + aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, + ctx->key.key_enc, rounds, 1); cmac_gf128_mul_by_x(consts, consts); cmac_gf128_mul_by_x(consts + 1, consts); @@ -787,10 +825,10 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key, if (err) return err; - kernel_neon_begin(); - aes_ecb_encrypt(key, ks[0], ctx->key.key_enc, rounds, 1); - aes_ecb_encrypt(ctx->consts, ks[1], ctx->key.key_enc, rounds, 2); - kernel_neon_end(); + scoped_ksimd() { + aes_ecb_encrypt(key, ks[0], ctx->key.key_enc, rounds, 1); + aes_ecb_encrypt(ctx->consts, ks[1], ctx->key.key_enc, rounds, 2); + } return cbcmac_setkey(tfm, key, sizeof(key)); } @@ -800,109 +838,63 @@ static int mac_init(struct shash_desc *desc) struct mac_desc_ctx *ctx = shash_desc_ctx(desc); memset(ctx->dg, 0, AES_BLOCK_SIZE); - ctx->len = 0; - return 0; } static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks, - u8 dg[], int enc_before, int enc_after) + u8 dg[], int enc_before) { int rounds = 6 + ctx->key_length / 4; + int rem; - if (crypto_simd_usable()) { - int rem; - - do { - kernel_neon_begin(); + do { + scoped_ksimd() rem = aes_mac_update(in, ctx->key_enc, rounds, blocks, - dg, enc_before, enc_after); - kernel_neon_end(); - in += (blocks - rem) * AES_BLOCK_SIZE; - blocks = rem; - enc_before = 0; - } while (blocks); - } else { - if (enc_before) - aes_encrypt(ctx, dg, dg); - - while (blocks--) { - crypto_xor(dg, in, AES_BLOCK_SIZE); - in += AES_BLOCK_SIZE; - - if (blocks || enc_after) - aes_encrypt(ctx, dg, dg); - } - } + dg, enc_before, !enc_before); + in += (blocks - rem) * AES_BLOCK_SIZE; + blocks = rem; + } while (blocks); } static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len) { struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct mac_desc_ctx *ctx = shash_desc_ctx(desc); + int blocks = len / AES_BLOCK_SIZE; - while (len > 0) { - unsigned int l; - - if ((ctx->len % AES_BLOCK_SIZE) == 0 && - (ctx->len + len) > AES_BLOCK_SIZE) { - - int blocks = len / AES_BLOCK_SIZE; - - len %= AES_BLOCK_SIZE; - - mac_do_update(&tctx->key, p, blocks, ctx->dg, - (ctx->len != 0), (len != 0)); - - p += blocks * AES_BLOCK_SIZE; - - if (!len) { - ctx->len = AES_BLOCK_SIZE; - break; - } - ctx->len = 0; - } - - l = min(len, AES_BLOCK_SIZE - ctx->len); - - if (l <= AES_BLOCK_SIZE) { - crypto_xor(ctx->dg + ctx->len, p, l); - ctx->len += l; - len -= l; - p += l; - } - } - - return 0; + len %= AES_BLOCK_SIZE; + mac_do_update(&tctx->key, p, blocks, ctx->dg, 0); + return len; } -static int cbcmac_final(struct shash_desc *desc, u8 *out) +static int cbcmac_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct mac_desc_ctx *ctx = shash_desc_ctx(desc); - mac_do_update(&tctx->key, NULL, 0, ctx->dg, (ctx->len != 0), 0); - + if (len) { + crypto_xor(ctx->dg, src, len); + mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1); + } memcpy(out, ctx->dg, AES_BLOCK_SIZE); - return 0; } -static int cmac_final(struct shash_desc *desc, u8 *out) +static int cmac_finup(struct shash_desc *desc, const u8 *src, unsigned int len, + u8 *out) { struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct mac_desc_ctx *ctx = shash_desc_ctx(desc); u8 *consts = tctx->consts; - if (ctx->len != AES_BLOCK_SIZE) { - ctx->dg[ctx->len] ^= 0x80; + crypto_xor(ctx->dg, src, len); + if (len != AES_BLOCK_SIZE) { + ctx->dg[len] ^= 0x80; consts += AES_BLOCK_SIZE; } - - mac_do_update(&tctx->key, consts, 1, ctx->dg, 0, 1); - + mac_do_update(&tctx->key, consts, 1, ctx->dg, 0); memcpy(out, ctx->dg, AES_BLOCK_SIZE); - return 0; } @@ -910,6 +902,8 @@ static struct shash_alg mac_algs[] = { { .base.cra_name = "cmac(aes)", .base.cra_driver_name = "cmac-aes-" MODE, .base.cra_priority = PRIO, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINAL_NONZERO, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct mac_tfm_ctx) + 2 * AES_BLOCK_SIZE, @@ -918,13 +912,15 @@ static struct shash_alg mac_algs[] = { { .digestsize = AES_BLOCK_SIZE, .init = mac_init, .update = mac_update, - .final = cmac_final, + .finup = cmac_finup, .setkey = cmac_setkey, .descsize = sizeof(struct mac_desc_ctx), }, { .base.cra_name = "xcbc(aes)", .base.cra_driver_name = "xcbc-aes-" MODE, .base.cra_priority = PRIO, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINAL_NONZERO, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct mac_tfm_ctx) + 2 * AES_BLOCK_SIZE, @@ -933,21 +929,22 @@ static struct shash_alg mac_algs[] = { { .digestsize = AES_BLOCK_SIZE, .init = mac_init, .update = mac_update, - .final = cmac_final, + .finup = cmac_finup, .setkey = xcbc_setkey, .descsize = sizeof(struct mac_desc_ctx), }, { .base.cra_name = "cbcmac(aes)", .base.cra_driver_name = "cbcmac-aes-" MODE, .base.cra_priority = PRIO, - .base.cra_blocksize = 1, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, + .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct mac_tfm_ctx), .base.cra_module = THIS_MODULE, .digestsize = AES_BLOCK_SIZE, .init = mac_init, .update = mac_update, - .final = cbcmac_final, + .finup = cbcmac_finup, .setkey = cbcmac_setkey, .descsize = sizeof(struct mac_desc_ctx), } }; @@ -979,10 +976,12 @@ unregister_ciphers: #ifdef USE_V8_CRYPTO_EXTENSIONS module_cpu_feature_match(AES, aes_init); +EXPORT_SYMBOL_NS(ce_aes_mac_update, "CRYPTO_INTERNAL"); #else module_init(aes_init); EXPORT_SYMBOL(neon_aes_ecb_encrypt); EXPORT_SYMBOL(neon_aes_cbc_encrypt); +EXPORT_SYMBOL(neon_aes_ctr_encrypt); EXPORT_SYMBOL(neon_aes_xts_encrypt); EXPORT_SYMBOL(neon_aes_xts_decrypt); #endif |
