diff options
Diffstat (limited to 'arch/arm/crypto/aes-neonbs-glue.c')
| -rw-r--r-- | arch/arm/crypto/aes-neonbs-glue.c | 283 |
1 files changed, 129 insertions, 154 deletions
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c index c76377961444..df5afe601e4a 100644 --- a/arch/arm/crypto/aes-neonbs-glue.c +++ b/arch/arm/crypto/aes-neonbs-glue.c @@ -1,22 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Bit sliced AES using NEON instructions * * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <asm/neon.h> +#include <asm/simd.h> #include <crypto/aes.h> -#include <crypto/cbc.h> -#include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> #include <crypto/xts.h> #include <linux/module.h> +#include "aes-cipher.h" MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); +MODULE_DESCRIPTION("Bit sliced AES using NEON instructions"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("ecb(aes)"); @@ -35,12 +34,12 @@ asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks, u8 iv[]); asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], - int rounds, int blocks, u8 ctr[], u8 final[]); + int rounds, int blocks, u8 ctr[]); asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], - int rounds, int blocks, u8 iv[]); + int rounds, int blocks, u8 iv[], int); asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], - int rounds, int blocks, u8 iv[]); + int rounds, int blocks, u8 iv[], int); struct aesbs_ctx { int rounds; @@ -49,12 +48,13 @@ struct aesbs_ctx { struct aesbs_cbc_ctx { struct aesbs_ctx key; - struct crypto_cipher *enc_tfm; + struct crypto_aes_ctx fallback; }; struct aesbs_xts_ctx { struct aesbs_ctx key; - struct crypto_cipher *tweak_tfm; + struct crypto_aes_ctx fallback; + struct crypto_aes_ctx tweak_key; }; static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key, @@ -64,7 +64,7 @@ static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key, struct crypto_aes_ctx rk; int err; - err = crypto_aes_expand_key(&rk, in_key, key_len); + err = aes_expandkey(&rk, in_key, key_len); if (err) return err; @@ -86,9 +86,8 @@ static int __ecb_crypt(struct skcipher_request *req, struct skcipher_walk walk; int err; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_neon_begin(); while (walk.nbytes >= AES_BLOCK_SIZE) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; @@ -96,12 +95,13 @@ static int __ecb_crypt(struct skcipher_request *req, blocks = round_down(blocks, walk.stride / AES_BLOCK_SIZE); + kernel_neon_begin(); fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk, ctx->rounds, blocks); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); } - kernel_neon_end(); return err; } @@ -120,32 +120,49 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_aes_ctx rk; int err; - err = crypto_aes_expand_key(&rk, in_key, key_len); + err = aes_expandkey(&ctx->fallback, in_key, key_len); if (err) return err; ctx->key.rounds = 6 + key_len / 4; kernel_neon_begin(); - aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds); + aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds); kernel_neon_end(); - return crypto_cipher_setkey(ctx->enc_tfm, in_key, key_len); -} - -static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) -{ - struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_cipher_encrypt_one(ctx->enc_tfm, dst, src); + return 0; } static int cbc_encrypt(struct skcipher_request *req) { - return crypto_cbc_encrypt_walk(req, cbc_encrypt_one); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + u8 *prev = walk.iv; + + do { + crypto_xor_cpy(dst, src, prev, AES_BLOCK_SIZE); + __aes_arm_encrypt(ctx->fallback.key_enc, + ctx->key.rounds, dst, dst); + prev = dst; + src += AES_BLOCK_SIZE; + dst += AES_BLOCK_SIZE; + nbytes -= AES_BLOCK_SIZE; + } while (nbytes >= AES_BLOCK_SIZE); + memcpy(walk.iv, prev, AES_BLOCK_SIZE); + err = skcipher_walk_done(&walk, nbytes); + } + return err; } static int cbc_decrypt(struct skcipher_request *req) @@ -155,9 +172,8 @@ static int cbc_decrypt(struct skcipher_request *req) struct skcipher_walk walk; int err; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_neon_begin(); while (walk.nbytes >= AES_BLOCK_SIZE) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; @@ -165,34 +181,18 @@ static int cbc_decrypt(struct skcipher_request *req) blocks = round_down(blocks, walk.stride / AES_BLOCK_SIZE); + kernel_neon_begin(); aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk, ctx->key.rounds, blocks, walk.iv); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); } - kernel_neon_end(); return err; } -static int cbc_init(struct crypto_tfm *tfm) -{ - struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0); - if (IS_ERR(ctx->enc_tfm)) - return PTR_ERR(ctx->enc_tfm); - return 0; -} - -static void cbc_exit(struct crypto_tfm *tfm) -{ - struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); - - crypto_free_cipher(ctx->enc_tfm); -} - static int ctr_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); @@ -201,37 +201,29 @@ static int ctr_encrypt(struct skcipher_request *req) u8 buf[AES_BLOCK_SIZE]; int err; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_neon_begin(); while (walk.nbytes > 0) { - unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; - u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL; - - if (walk.nbytes < walk.total) { - blocks = round_down(blocks, - walk.stride / AES_BLOCK_SIZE); - final = NULL; - } + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int bytes = walk.nbytes; - aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->rk, ctx->rounds, blocks, walk.iv, final); + if (unlikely(bytes < AES_BLOCK_SIZE)) + src = dst = memcpy(buf + sizeof(buf) - bytes, + src, bytes); + else if (walk.nbytes < walk.total) + bytes &= ~(8 * AES_BLOCK_SIZE - 1); - if (final) { - u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; - u8 *src = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; + kernel_neon_begin(); + aesbs_ctr_encrypt(dst, src, ctx->rk, ctx->rounds, bytes, walk.iv); + kernel_neon_end(); - if (dst != src) - memcpy(dst, src, walk.total % AES_BLOCK_SIZE); - crypto_xor(dst, final, walk.total % AES_BLOCK_SIZE); + if (unlikely(bytes < AES_BLOCK_SIZE)) + memcpy(walk.dst.virt.addr, + buf + sizeof(buf) - bytes, bytes); - err = skcipher_walk_done(&walk, 0); - break; - } - err = skcipher_walk_done(&walk, - walk.nbytes - blocks * AES_BLOCK_SIZE); + err = skcipher_walk_done(&walk, walk.nbytes - bytes); } - kernel_neon_end(); return err; } @@ -247,79 +239,106 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, return err; key_len /= 2; - err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len); + err = aes_expandkey(&ctx->fallback, in_key, key_len); + if (err) + return err; + err = aes_expandkey(&ctx->tweak_key, in_key + key_len, key_len); if (err) return err; return aesbs_setkey(tfm, in_key, key_len); } -static int xts_init(struct crypto_tfm *tfm) -{ - struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0); - if (IS_ERR(ctx->tweak_tfm)) - return PTR_ERR(ctx->tweak_tfm); - return 0; -} - -static void xts_exit(struct crypto_tfm *tfm) -{ - struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); - - crypto_free_cipher(ctx->tweak_tfm); -} - -static int __xts_crypt(struct skcipher_request *req, +static int __xts_crypt(struct skcipher_request *req, bool encrypt, void (*fn)(u8 out[], u8 const in[], u8 const rk[], - int rounds, int blocks, u8 iv[])) + int rounds, int blocks, u8 iv[], int)) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + const int rounds = ctx->key.rounds; + int tail = req->cryptlen % AES_BLOCK_SIZE; + struct skcipher_request subreq; + u8 buf[2 * AES_BLOCK_SIZE]; struct skcipher_walk walk; int err; + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + + if (unlikely(tail)) { + skcipher_request_set_tfm(&subreq, tfm); + skcipher_request_set_callback(&subreq, + skcipher_request_flags(req), + NULL, NULL); + skcipher_request_set_crypt(&subreq, req->src, req->dst, + req->cryptlen - tail, req->iv); + req = &subreq; + } + err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; - crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv); + __aes_arm_encrypt(ctx->tweak_key.key_enc, rounds, walk.iv, walk.iv); - kernel_neon_begin(); while (walk.nbytes >= AES_BLOCK_SIZE) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; + int reorder_last_tweak = !encrypt && tail > 0; - if (walk.nbytes < walk.total) + if (walk.nbytes < walk.total) { blocks = round_down(blocks, walk.stride / AES_BLOCK_SIZE); + reorder_last_tweak = 0; + } + kernel_neon_begin(); fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk, - ctx->key.rounds, blocks, walk.iv); + rounds, blocks, walk.iv, reorder_last_tweak); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); } - kernel_neon_end(); - return err; + if (err || likely(!tail)) + return err; + + /* handle ciphertext stealing */ + scatterwalk_map_and_copy(buf, req->dst, req->cryptlen - AES_BLOCK_SIZE, + AES_BLOCK_SIZE, 0); + memcpy(buf + AES_BLOCK_SIZE, buf, tail); + scatterwalk_map_and_copy(buf, req->src, req->cryptlen, tail, 0); + + crypto_xor(buf, req->iv, AES_BLOCK_SIZE); + + if (encrypt) + __aes_arm_encrypt(ctx->fallback.key_enc, rounds, buf, buf); + else + __aes_arm_decrypt(ctx->fallback.key_dec, rounds, buf, buf); + + crypto_xor(buf, req->iv, AES_BLOCK_SIZE); + + scatterwalk_map_and_copy(buf, req->dst, req->cryptlen - AES_BLOCK_SIZE, + AES_BLOCK_SIZE + tail, 1); + return 0; } static int xts_encrypt(struct skcipher_request *req) { - return __xts_crypt(req, aesbs_xts_encrypt); + return __xts_crypt(req, true, aesbs_xts_encrypt); } static int xts_decrypt(struct skcipher_request *req) { - return __xts_crypt(req, aesbs_xts_decrypt); + return __xts_crypt(req, false, aesbs_xts_decrypt); } static struct skcipher_alg aes_algs[] = { { - .base.cra_name = "__ecb(aes)", - .base.cra_driver_name = "__ecb-aes-neonbs", + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-neonbs", .base.cra_priority = 250, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct aesbs_ctx), .base.cra_module = THIS_MODULE, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -328,15 +347,12 @@ static struct skcipher_alg aes_algs[] = { { .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { - .base.cra_name = "__cbc(aes)", - .base.cra_driver_name = "__cbc-aes-neonbs", + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-neonbs", .base.cra_priority = 250, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx), .base.cra_module = THIS_MODULE, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_init = cbc_init, - .base.cra_exit = cbc_exit, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -346,13 +362,12 @@ static struct skcipher_alg aes_algs[] = { { .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, { - .base.cra_name = "__ctr(aes)", - .base.cra_driver_name = "__ctr-aes-neonbs", + .base.cra_name = "ctr(aes)", + .base.cra_driver_name = "ctr-aes-neonbs", .base.cra_priority = 250, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct aesbs_ctx), .base.cra_module = THIS_MODULE, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -363,15 +378,12 @@ static struct skcipher_alg aes_algs[] = { { .encrypt = ctr_encrypt, .decrypt = ctr_encrypt, }, { - .base.cra_name = "__xts(aes)", - .base.cra_driver_name = "__xts-aes-neonbs", + .base.cra_name = "xts(aes)", + .base.cra_driver_name = "xts-aes-neonbs", .base.cra_priority = 250, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct aesbs_xts_ctx), .base.cra_module = THIS_MODULE, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_init = xts_init, - .base.cra_exit = xts_exit, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, @@ -382,55 +394,18 @@ static struct skcipher_alg aes_algs[] = { { .decrypt = xts_decrypt, } }; -static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)]; - static void aes_exit(void) { - int i; - - for (i = 0; i < ARRAY_SIZE(aes_simd_algs); i++) - if (aes_simd_algs[i]) - simd_skcipher_free(aes_simd_algs[i]); - crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); } static int __init aes_init(void) { - struct simd_skcipher_alg *simd; - const char *basename; - const char *algname; - const char *drvname; - int err; - int i; - if (!(elf_hwcap & HWCAP_NEON)) return -ENODEV; - err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); - if (err) - return err; - - for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { - if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL)) - continue; - - algname = aes_algs[i].base.cra_name + 2; - drvname = aes_algs[i].base.cra_driver_name + 2; - basename = aes_algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(algname, drvname, basename); - err = PTR_ERR(simd); - if (IS_ERR(simd)) - goto unregister_simds; - - aes_simd_algs[i] = simd; - } - return 0; - -unregister_simds: - aes_exit(); - return err; + return crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); } -late_initcall(aes_init); +module_init(aes_init); module_exit(aes_exit); |
